aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-03-03 21:16:48 -0500
committerDavid S. Miller <davem@davemloft.net>2015-03-03 21:16:48 -0500
commit71a83a6db6138b9d41d8a0b6b91cb59f6dc4742c (patch)
treef74b6e4e48257ec6ce40b95645ecb8533b9cc1f8
parentb97526f3ff95f92b107f0fb52cbb8627e395429b (diff)
parenta6c5170d1edea97c538c81e377e56c7b5c5b7e63 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/rocker/rocker.c The rocker commit was two overlapping changes, one to rename the ->vport member to ->pport, and another making the bitmask expression use '1ULL' instead of plain '1'. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/ABI/testing/sysfs-driver-samsung-laptop8
-rw-r--r--Documentation/ABI/testing/sysfs-driver-toshiba_acpi114
-rw-r--r--Documentation/DocBook/kgdb.tmpl6
-rw-r--r--Documentation/cgroups/unified-hierarchy.txt4
-rw-r--r--Documentation/clk.txt2
-rw-r--r--Documentation/device-mapper/dm-crypt.txt15
-rw-r--r--Documentation/devicetree/bindings/clock/exynos7-clock.txt15
-rw-r--r--Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt10
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,lcc.txt21
-rw-r--r--Documentation/devicetree/bindings/clock/qoriq-clock.txt5
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt33
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt12
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi.txt43
-rw-r--r--Documentation/devicetree/bindings/clock/ti,cdce706.txt42
-rw-r--r--Documentation/devicetree/bindings/clock/ti/fapll.txt33
-rw-r--r--Documentation/devicetree/bindings/dma/img-mdc-dma.txt57
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt3
-rw-r--r--Documentation/devicetree/bindings/dma/snps-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt37
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt3
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-ocores.txt42
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.txt14
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt5
-rw-r--r--Documentation/devicetree/bindings/mfd/da9063.txt93
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom-rpm.txt70
-rw-r--r--Documentation/devicetree/bindings/mips/cavium/cib.txt43
-rw-r--r--Documentation/devicetree/bindings/mmc/sunxi-mmc.txt8
-rw-r--r--Documentation/devicetree/bindings/mtd/atmel-nand.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/fsl-quadspi.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmi-nand.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/hisi504-nand.txt47
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd-physmap.txt5
-rw-r--r--Documentation/devicetree/bindings/net/amd-xgbe-phy.txt4
-rw-r--r--Documentation/devicetree/bindings/pwm/img-pwm.txt24
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-sun4i.txt20
-rw-r--r--Documentation/devicetree/bindings/thermal/exynos-thermal.txt21
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal.txt74
-rw-r--r--Documentation/devicetree/bindings/watchdog/gpio-wdt.txt5
-rw-r--r--Documentation/devicetree/bindings/watchdog/imgpdc-wdt.txt19
-rw-r--r--Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt12
-rw-r--r--Documentation/devicetree/bindings/watchdog/mtk-wdt.txt13
-rw-r--r--Documentation/dmaengine/provider.txt97
-rw-r--r--Documentation/filesystems/Locking2
-rw-r--r--Documentation/filesystems/dlmfs.txt4
-rw-r--r--Documentation/filesystems/ocfs2.txt4
-rw-r--r--Documentation/filesystems/overlayfs.txt28
-rw-r--r--Documentation/i2c/functionality2
-rw-r--r--Documentation/ia64/paravirt_ops.txt137
-rw-r--r--Documentation/input/alps.txt68
-rw-r--r--Documentation/kbuild/makefiles.txt20
-rw-r--r--Documentation/virtual/00-INDEX3
-rw-r--r--Documentation/virtual/paravirt_ops.txt32
-rw-r--r--Documentation/x86/zero-page.txt2
-rw-r--r--Kbuild61
-rw-r--r--MAINTAINERS30
-rw-r--r--Makefile12
-rw-r--r--arch/alpha/include/asm/uaccess.h86
-rw-r--r--arch/arc/boot/dts/abilis_tb10x.dtsi2
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi1
-rw-r--r--arch/arm/boot/dts/am437x-idk-evm.dts25
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts8
-rw-r--r--arch/arm/boot/dts/bcm-cygnus.dtsi20
-rw-r--r--arch/arm/boot/dts/bcm63138.dtsi5
-rw-r--r--arch/arm/boot/dts/dm8168-evm.dts25
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi34
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts8
-rw-r--r--arch/arm/boot/dts/dra7.dtsi8
-rw-r--r--arch/arm/boot/dts/dra72-evm.dts8
-rw-r--r--arch/arm/boot/dts/omap2.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts9
-rw-r--r--arch/arm/boot/dts/omap3.dtsi4
-rw-r--r--arch/arm/boot/dts/omap4.dtsi4
-rw-r--r--arch/arm/boot/dts/omap5.dtsi8
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi4
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi72
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi54
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi44
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi86
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi72
-rw-r--r--arch/arm/boot/dts/sun8i-a23.dtsi96
-rw-r--r--arch/arm/configs/multi_v7_defconfig82
-rw-r--r--arch/arm/configs/omap2plus_defconfig4
-rw-r--r--arch/arm/include/asm/uaccess.h96
-rw-r--r--arch/arm/kernel/perf_event_cpu.c2
-rw-r--r--arch/arm/mach-asm9260/Kconfig2
-rw-r--r--arch/arm/mach-at91/Kconfig1
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c2
-rw-r--r--arch/arm/mach-at91/generic.h8
-rw-r--r--arch/arm/mach-at91/pm.c4
-rw-r--r--arch/arm/mach-axxia/axxia.c2
-rw-r--r--arch/arm/mach-bcm/Kconfig4
-rw-r--r--arch/arm/mach-bcm/brcmstb.c2
-rw-r--r--arch/arm/mach-davinci/Kconfig2
-rw-r--r--arch/arm/mach-davinci/da8xx-dt.c2
-rw-r--r--arch/arm/mach-davinci/mux.c4
-rw-r--r--arch/arm/mach-exynos/exynos.c2
-rw-r--r--arch/arm/mach-exynos/suspend.c2
-rw-r--r--arch/arm/mach-highbank/highbank.c2
-rw-r--r--arch/arm/mach-hisi/hisilicon.c8
-rw-r--r--arch/arm/mach-imx/mmdc.c2
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/io.h19
-rw-r--r--arch/arm/mach-keystone/keystone.c2
-rw-r--r--arch/arm/mach-keystone/pm_domain.c2
-rw-r--r--arch/arm/mach-mmp/time.c2
-rw-r--r--arch/arm/mach-msm/board-halibut.c8
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c8
-rw-r--r--arch/arm/mach-mvebu/coherency.c2
-rw-r--r--arch/arm/mach-mvebu/pmsu.c2
-rw-r--r--arch/arm/mach-mvebu/system-controller.c2
-rw-r--r--arch/arm/mach-nspire/nspire.c2
-rw-r--r--arch/arm/mach-omap2/Makefile2
-rw-r--r--arch/arm/mach-omap2/cclock3xxx_data.c3688
-rw-r--r--arch/arm/mach-omap2/clock.c16
-rw-r--r--arch/arm/mach-omap2/clock.h14
-rw-r--r--arch/arm/mach-omap2/clock_common_data.c11
-rw-r--r--arch/arm/mach-omap2/dpll3xxx.c13
-rw-r--r--arch/arm/mach-omap2/dpll44xx.c2
-rw-r--r--arch/arm/mach-omap2/io.c26
-rw-r--r--arch/arm/mach-omap2/omap4-common.c2
-rw-r--r--arch/arm/mach-omap2/prm.h1
-rw-r--r--arch/arm/mach-omap2/prm3xxx.c2
-rw-r--r--arch/arm/mach-omap2/prm44xx.c2
-rw-r--r--arch/arm/mach-omap2/prm_common.c11
-rw-r--r--arch/arm/mach-prima2/Kconfig1
-rw-r--r--arch/arm/mach-prima2/common.c6
-rw-r--r--arch/arm/mach-prima2/platsmp.c2
-rw-r--r--arch/arm/mach-pxa/idp.c5
-rw-r--r--arch/arm/mach-pxa/lpd270.c8
-rw-r--r--arch/arm/mach-realview/core.c7
-rw-r--r--arch/arm/mach-realview/realview_eb.c2
-rw-r--r--arch/arm/mach-rockchip/Kconfig1
-rw-r--r--arch/arm/mach-rockchip/pm.h6
-rw-r--r--arch/arm/mach-s5pv210/s5pv210.c2
-rw-r--r--arch/arm/mach-sa1100/neponset.c6
-rw-r--r--arch/arm/mach-sa1100/pleb.c7
-rw-r--r--arch/arm/mach-shmobile/setup-emev2.c2
-rw-r--r--arch/arm/mach-sti/Kconfig1
-rw-r--r--arch/arm/mach-tegra/tegra.c2
-rw-r--r--arch/arm/mach-ux500/pm_domains.c2
-rw-r--r--arch/arm/mach-versatile/versatile_dt.c2
-rw-r--r--arch/arm/mach-vexpress/Kconfig1
-rw-r--r--arch/arm/mm/Kconfig7
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8.dts8
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts14
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts8
-rw-r--r--arch/arm64/crypto/Makefile2
-rw-r--r--arch/arm64/include/asm/assembler.h5
-rw-r--r--arch/arm64/include/asm/cpuidle.h2
-rw-r--r--arch/arm64/include/asm/insn.h6
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/include/asm/processor.h3
-rw-r--r--arch/arm64/include/asm/tlbflush.h5
-rw-r--r--arch/arm64/include/asm/uaccess.h4
-rw-r--r--arch/arm64/kernel/Makefile5
-rw-r--r--arch/arm64/kernel/ftrace.c2
-rw-r--r--arch/arm64/kernel/insn.c4
-rw-r--r--arch/arm64/kernel/psci-call.S28
-rw-r--r--arch/arm64/kernel/psci.c37
-rw-r--r--arch/arm64/kernel/signal32.c5
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S3
-rw-r--r--arch/arm64/mm/dma-mapping.c16
-rw-r--r--arch/arm64/mm/init.c14
-rw-r--r--arch/avr32/include/asm/uaccess.h24
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c2
-rw-r--r--arch/blackfin/include/asm/uaccess.h32
-rw-r--r--arch/blackfin/mach-bf527/boards/ad7160eval.c15
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c15
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c7
-rw-r--r--arch/blackfin/mach-bf609/boards/ezkit.c7
-rw-r--r--arch/frv/include/asm/pgtable.h2
-rw-r--r--arch/frv/include/asm/segment.h2
-rw-r--r--arch/ia64/include/asm/uaccess.h11
-rw-r--r--arch/m32r/include/asm/pgtable-2level.h1
-rw-r--r--arch/m32r/include/asm/uaccess.h88
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h2
-rw-r--r--arch/m68k/include/asm/segment.h2
-rw-r--r--arch/m68k/include/asm/uaccess_mm.h40
-rw-r--r--arch/metag/include/asm/processor.h4
-rw-r--r--arch/metag/include/asm/uaccess.h25
-rw-r--r--arch/mips/Kconfig73
-rw-r--r--arch/mips/Kconfig.debug13
-rw-r--r--arch/mips/Makefile55
-rw-r--r--arch/mips/alchemy/common/clock.c33
-rw-r--r--arch/mips/alchemy/common/setup.c4
-rw-r--r--arch/mips/bcm3384/irq.c2
-rw-r--r--arch/mips/boot/Makefile49
-rw-r--r--arch/mips/boot/elf2ecoff.c4
-rw-r--r--arch/mips/cavium-octeon/csrc-octeon.c11
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c4
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-board.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c1094
-rw-r--r--arch/mips/cavium-octeon/setup.c56
-rw-r--r--arch/mips/configs/malta_qemu_32r6_defconfig193
-rw-r--r--arch/mips/fw/arc/misc.c26
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/asmmacro.h18
-rw-r--r--arch/mips/include/asm/atomic.h42
-rw-r--r--arch/mips/include/asm/bitops.h64
-rw-r--r--arch/mips/include/asm/checksum.h45
-rw-r--r--arch/mips/include/asm/cmpxchg.h34
-rw-r--r--arch/mips/include/asm/compiler.h24
-rw-r--r--arch/mips/include/asm/cpu-features.h28
-rw-r--r--arch/mips/include/asm/cpu-info.h5
-rw-r--r--arch/mips/include/asm/cpu-type.h7
-rw-r--r--arch/mips/include/asm/cpu.h11
-rw-r--r--arch/mips/include/asm/edac.h4
-rw-r--r--arch/mips/include/asm/elf.h10
-rw-r--r--arch/mips/include/asm/fpu.h3
-rw-r--r--arch/mips/include/asm/futex.h24
-rw-r--r--arch/mips/include/asm/gio_device.h2
-rw-r--r--arch/mips/include/asm/hazards.h9
-rw-r--r--arch/mips/include/asm/irqflags.h7
-rw-r--r--arch/mips/include/asm/local.h5
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h64
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/war.h3
-rw-r--r--arch/mips/include/asm/mach-jz4740/jz4740_nand.h2
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h24
-rw-r--r--arch/mips/include/asm/mips-r2-to-r6-emul.h96
-rw-r--r--arch/mips/include/asm/mipsregs.h4
-rw-r--r--arch/mips/include/asm/mmu.h3
-rw-r--r--arch/mips/include/asm/mmu_context.h9
-rw-r--r--arch/mips/include/asm/module.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-cmd-queue.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-rst-defs.h306
-rw-r--r--arch/mips/include/asm/octeon/octeon-model.h107
-rw-r--r--arch/mips/include/asm/octeon/octeon.h148
-rw-r--r--arch/mips/include/asm/pci.h2
-rw-r--r--arch/mips/include/asm/pgtable-bits.h83
-rw-r--r--arch/mips/include/asm/pgtable.h46
-rw-r--r--arch/mips/include/asm/processor.h19
-rw-r--r--arch/mips/include/asm/prom.h7
-rw-r--r--arch/mips/include/asm/ptrace.h4
-rw-r--r--arch/mips/include/asm/r4kcache.h150
-rw-r--r--arch/mips/include/asm/sgialib.h8
-rw-r--r--arch/mips/include/asm/siginfo.h29
-rw-r--r--arch/mips/include/asm/spinlock.h55
-rw-r--r--arch/mips/include/asm/spram.h4
-rw-r--r--arch/mips/include/asm/stackframe.h8
-rw-r--r--arch/mips/include/asm/switch_to.h9
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mips/include/uapi/asm/inst.h24
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h11
-rw-r--r--arch/mips/jz4740/board-qi_lb60.c11
-rw-r--r--arch/mips/kernel/Makefile3
-rw-r--r--arch/mips/kernel/asm-offsets.c2
-rw-r--r--arch/mips/kernel/branch.c288
-rw-r--r--arch/mips/kernel/cevt-r4k.c8
-rw-r--r--arch/mips/kernel/cps-vec.S16
-rw-r--r--arch/mips/kernel/cpu-bugs64.c11
-rw-r--r--arch/mips/kernel/cpu-probe.c33
-rw-r--r--arch/mips/kernel/elf.c301
-rw-r--r--arch/mips/kernel/entry.S23
-rw-r--r--arch/mips/kernel/genex.S2
-rw-r--r--arch/mips/kernel/idle.c1
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c2378
-rw-r--r--arch/mips/kernel/mips_ksyms.c12
-rw-r--r--arch/mips/kernel/octeon_switch.S218
-rw-r--r--arch/mips/kernel/proc.c8
-rw-r--r--arch/mips/kernel/process.c96
-rw-r--r--arch/mips/kernel/r4k_fpu.S12
-rw-r--r--arch/mips/kernel/r4k_switch.S14
-rw-r--r--arch/mips/kernel/spram.c1
-rw-r--r--arch/mips/kernel/syscall.c2
-rw-r--r--arch/mips/kernel/traps.c60
-rw-r--r--arch/mips/kernel/unaligned.c390
-rw-r--r--arch/mips/lib/Makefile1
-rw-r--r--arch/mips/lib/memcpy.S23
-rw-r--r--arch/mips/lib/memset.S47
-rw-r--r--arch/mips/lib/mips-atomic.c2
-rw-r--r--arch/mips/math-emu/cp1emu.c169
-rw-r--r--arch/mips/mm/c-r4k.c6
-rw-r--r--arch/mips/mm/fault.c29
-rw-r--r--arch/mips/mm/page.c30
-rw-r--r--arch/mips/mm/sc-mips.c4
-rw-r--r--arch/mips/mm/tlb-r4k.c8
-rw-r--r--arch/mips/mm/tlbex.c7
-rw-r--r--arch/mips/mm/uasm-micromips.c8
-rw-r--r--arch/mips/mm/uasm-mips.c38
-rw-r--r--arch/mips/mm/uasm.c15
-rw-r--r--arch/mips/mti-sead3/sead3-time.c2
-rw-r--r--arch/mips/pci/pci-bcm1480.c4
-rw-r--r--arch/mips/pci/pci-octeon.c4
-rw-r--r--arch/mips/pci/pcie-octeon.c12
-rw-r--r--arch/mips/pmcs-msp71xx/Kconfig6
-rw-r--r--arch/mips/sgi-ip22/ip22-gio.c24
-rw-r--r--arch/mips/sgi-ip27/ip27-reset.c7
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c7
-rw-r--r--arch/mn10300/include/asm/pgtable.h2
-rw-r--r--arch/mn10300/unit-asb2305/pci-iomap.c35
-rw-r--r--arch/openrisc/include/asm/uaccess.h4
-rw-r--r--arch/parisc/Makefile2
-rw-r--r--arch/parisc/include/asm/pgtable.h1
-rw-r--r--arch/powerpc/Makefile6
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig1
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig1
-rw-r--r--arch/powerpc/kernel/time.c5
-rw-r--r--arch/powerpc/platforms/512x/clock-commonclk.c11
-rw-r--r--arch/s390/hypfs/inode.c53
-rw-r--r--arch/s390/include/asm/pci_io.h1
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/topology.h24
-rw-r--r--arch/s390/kernel/cache.c25
-rw-r--r--arch/s390/kernel/early.c12
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/smp.c54
-rw-r--r--arch/s390/kernel/topology.c134
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S6
-rw-r--r--arch/s390/mm/mmap.c5
-rw-r--r--arch/s390/pci/pci.c34
-rw-r--r--arch/sh/include/asm/segment.h2
-rw-r--r--arch/sh/include/asm/uaccess.h4
-rw-r--r--arch/sh/include/asm/uaccess_64.h8
-rw-r--r--arch/sparc/include/asm/uaccess_32.h339
-rw-r--r--arch/sparc/include/asm/uaccess_64.h222
-rw-r--r--arch/x86/Kconfig16
-rw-r--r--arch/x86/Kconfig.debug13
-rw-r--r--arch/x86/Makefile.um2
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/boot/compressed/aslr.c34
-rw-r--r--arch/x86/boot/compressed/efi_stub_64.S25
-rw-r--r--arch/x86/boot/compressed/efi_thunk_64.S196
-rw-r--r--arch/x86/boot/compressed/misc.c3
-rw-r--r--arch/x86/boot/compressed/misc.h6
-rw-r--r--arch/x86/include/asm/apic.h8
-rw-r--r--arch/x86/include/asm/imr.h60
-rw-r--r--arch/x86/include/asm/lguest_hcall.h1
-rw-r--r--arch/x86/include/asm/page_types.h2
-rw-r--r--arch/x86/include/asm/pgtable.h6
-rw-r--r--arch/x86/include/asm/spinlock.h90
-rw-r--r--arch/x86/include/asm/uaccess.h2
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h1
-rw-r--r--arch/x86/kernel/acpi/boot.c5
-rw-r--r--arch/x86/kernel/cpu/common.c6
-rw-r--r--arch/x86/kernel/cpu/intel.c4
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/intel_early.c6
-rw-r--r--arch/x86/kernel/entry_32.S3
-rw-r--r--arch/x86/kernel/entry_64.S3
-rw-r--r--arch/x86/kernel/irq.c3
-rw-r--r--arch/x86/kernel/kprobes/core.c56
-rw-r--r--arch/x86/kernel/kprobes/opt.c2
-rw-r--r--arch/x86/kernel/kvm.c13
-rw-r--r--arch/x86/kernel/module.c10
-rw-r--r--arch/x86/kernel/setup.c22
-rw-r--r--arch/x86/kernel/uprobes.c153
-rw-r--r--arch/x86/lguest/Kconfig4
-rw-r--r--arch/x86/lguest/boot.c173
-rw-r--r--arch/x86/mm/init.c28
-rw-r--r--arch/x86/mm/mmap.c6
-rw-r--r--arch/x86/platform/Makefile1
-rw-r--r--arch/x86/platform/efi/efi_stub_64.S161
-rw-r--r--arch/x86/platform/efi/efi_thunk_64.S121
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c2
-rw-r--r--arch/x86/platform/intel-quark/Makefile2
-rw-r--r--arch/x86/platform/intel-quark/imr.c661
-rw-r--r--arch/x86/platform/intel-quark/imr_selftest.c129
-rw-r--r--arch/x86/xen/enlighten.c20
-rw-r--r--arch/x86/xen/spinlock.c13
-rw-r--r--arch/xtensa/include/asm/uaccess.h90
-rw-r--r--block/blk-throttle.c3
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_lpat.c161
-rw-r--r--drivers/acpi/acpi_lpss.c21
-rw-r--r--drivers/acpi/ec.c4
-rw-r--r--drivers/acpi/pmic/intel_pmic.c133
-rw-r--r--drivers/acpi/resource.c2
-rw-r--r--drivers/acpi/video.c9
-rw-r--r--drivers/block/nvme-core.c513
-rw-r--r--drivers/block/nvme-scsi.c96
-rw-r--r--drivers/block/rbd.c193
-rw-r--r--drivers/block/virtio_blk.c12
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c6
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c102
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c121
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c6
-rw-r--r--drivers/char/virtio_console.c5
-rw-r--r--drivers/clk/Kconfig18
-rw-r--r--drivers/clk/Makefile4
-rw-r--r--drivers/clk/at91/clk-programmable.c2
-rw-r--r--drivers/clk/bcm/clk-kona.c2
-rw-r--r--drivers/clk/clk-asm9260.c348
-rw-r--r--drivers/clk/clk-cdce706.c700
-rw-r--r--drivers/clk/clk-composite.c29
-rw-r--r--drivers/clk/clk-divider.c228
-rw-r--r--drivers/clk/clk-gate.c18
-rw-r--r--drivers/clk/clk-mux.c16
-rw-r--r--drivers/clk/clk-qoriq.c (renamed from drivers/clk/clk-ppc-corenet.c)178
-rw-r--r--drivers/clk/clk.c1009
-rw-r--r--drivers/clk/clk.h24
-rw-r--r--drivers/clk/clkdev.c110
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c2
-rw-r--r--drivers/clk/mmp/clk-mix.c2
-rw-r--r--drivers/clk/pxa/Makefile1
-rw-r--r--drivers/clk/pxa/clk-pxa.c2
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c364
-rw-r--r--drivers/clk/qcom/Kconfig18
-rw-r--r--drivers/clk/qcom/Makefile4
-rw-r--r--drivers/clk/qcom/clk-pll.c1
-rw-r--r--drivers/clk/qcom/clk-rcg.c10
-rw-r--r--drivers/clk/qcom/clk-rcg2.c6
-rw-r--r--drivers/clk/qcom/clk-regmap-divider.c70
-rw-r--r--drivers/clk/qcom/clk-regmap-divider.h29
-rw-r--r--drivers/clk/qcom/clk-regmap-mux.c59
-rw-r--r--drivers/clk/qcom/clk-regmap-mux.h29
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c12
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c473
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c585
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c48
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c32
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c217
-rw-r--r--drivers/clk/samsung/clk-exynos4.c10
-rw-r--r--drivers/clk/samsung/clk-exynos4415.c216
-rw-r--r--drivers/clk/samsung/clk-exynos7.c408
-rw-r--r--drivers/clk/samsung/clk.c13
-rw-r--r--drivers/clk/samsung/clk.h3
-rw-r--r--drivers/clk/shmobile/Makefile2
-rw-r--r--drivers/clk/shmobile/clk-div6.c18
-rw-r--r--drivers/clk/shmobile/clk-r8a73a4.c241
-rw-r--r--drivers/clk/shmobile/clk-rcar-gen2.c88
-rw-r--r--drivers/clk/st/clk-flexgen.c39
-rw-r--r--drivers/clk/st/clkgen-mux.c14
-rw-r--r--drivers/clk/sunxi/Makefile1
-rw-r--r--drivers/clk/sunxi/clk-factors.c12
-rw-r--r--drivers/clk/sunxi/clk-factors.h7
-rw-r--r--drivers/clk/sunxi/clk-mod0.c224
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c2
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c13
-rw-r--r--drivers/clk/sunxi/clk-sun9i-core.c119
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c219
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c262
-rw-r--r--drivers/clk/tegra/Makefile1
-rw-r--r--drivers/clk/tegra/clk-id.h2
-rw-r--r--drivers/clk/tegra/clk-periph.c14
-rw-r--r--drivers/clk/tegra/clk-pll.c18
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c18
-rw-r--r--drivers/clk/tegra/clk-tegra114.c10
-rw-r--r--drivers/clk/tegra/clk-tegra124.c168
-rw-r--r--drivers/clk/tegra/clk.c7
-rw-r--r--drivers/clk/ti/Makefile8
-rw-r--r--drivers/clk/ti/clk-3xxx-legacy.c4653
-rw-r--r--drivers/clk/ti/clk-3xxx.c8
-rw-r--r--drivers/clk/ti/clk-44xx.c2
-rw-r--r--drivers/clk/ti/clk-54xx.c2
-rw-r--r--drivers/clk/ti/clk-7xx.c2
-rw-r--r--drivers/clk/ti/clk-816x.c53
-rw-r--r--drivers/clk/ti/clk.c127
-rw-r--r--drivers/clk/ti/clock.h172
-rw-r--r--drivers/clk/ti/composite.c48
-rw-r--r--drivers/clk/ti/divider.c132
-rw-r--r--drivers/clk/ti/dpll.c121
-rw-r--r--drivers/clk/ti/fapll.c410
-rw-r--r--drivers/clk/ti/gate.c163
-rw-r--r--drivers/clk/ti/interface.c98
-rw-r--r--drivers/clk/ti/mux.c70
-rw-r--r--drivers/clk/ux500/clk-prcc.c1
-rw-r--r--drivers/clk/ux500/clk-prcmu.c1
-rw-r--r--drivers/clk/zynq/clkc.c1
-rw-r--r--drivers/clocksource/Kconfig16
-rw-r--r--drivers/clocksource/mtk_timer.c9
-rw-r--r--drivers/clocksource/pxa_timer.c2
-rw-r--r--drivers/connector/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm44
-rw-r--r--drivers/cpufreq/Kconfig.powerpc2
-rw-r--r--drivers/cpufreq/Makefile9
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c33
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c4
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c10
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c84
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c4
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/amba-pl08x.c156
-rw-r--r--drivers/dma/at_hdmac.c130
-rw-r--r--drivers/dma/at_hdmac_regs.h3
-rw-r--r--drivers/dma/at_xdmac.c186
-rw-r--r--drivers/dma/bcm2835-dma.c46
-rw-r--r--drivers/dma/coh901318.c153
-rw-r--r--drivers/dma/cppi41.c30
-rw-r--r--drivers/dma/dma-jz4740.c20
-rw-r--r--drivers/dma/dmaengine.c84
-rw-r--r--drivers/dma/dmatest.c35
-rw-r--r--drivers/dma/dw/core.c101
-rw-r--r--drivers/dma/dw/platform.c4
-rw-r--r--drivers/dma/dw/regs.h4
-rw-r--r--drivers/dma/edma.c73
-rw-r--r--drivers/dma/ep93xx_dma.c43
-rw-r--r--drivers/dma/fsl-edma.c123
-rw-r--r--drivers/dma/fsldma.c97
-rw-r--r--drivers/dma/fsldma.h4
-rw-r--r--drivers/dma/img-mdc-dma.c1011
-rw-r--r--drivers/dma/imx-dma.c108
-rw-r--r--drivers/dma/imx-sdma.c150
-rw-r--r--drivers/dma/intel_mid_dma.c25
-rw-r--r--drivers/dma/ioat/dma_v3.c25
-rw-r--r--drivers/dma/ioat/hw.h5
-rw-r--r--drivers/dma/ioat/pci.c5
-rw-r--r--drivers/dma/ipu/ipu_idmac.c96
-rw-r--r--drivers/dma/k3dma.c203
-rw-r--r--drivers/dma/mmp_pdma.c109
-rw-r--r--drivers/dma/mmp_tdma.c85
-rw-r--r--drivers/dma/moxart-dma.c25
-rw-r--r--drivers/dma/mpc512x_dma.c111
-rw-r--r--drivers/dma/mv_xor.c9
-rw-r--r--drivers/dma/mxs-dma.c65
-rw-r--r--drivers/dma/nbpfaxi.c112
-rw-r--r--drivers/dma/of-dma.c4
-rw-r--r--drivers/dma/omap-dma.c69
-rw-r--r--drivers/dma/pch_dma.c8
-rw-r--r--drivers/dma/pl330.c230
-rw-r--r--drivers/dma/qcom_bam_dma.c85
-rw-r--r--drivers/dma/s3c24xx-dma.c73
-rw-r--r--drivers/dma/sa11x0-dma.c157
-rw-r--r--drivers/dma/sh/Kconfig14
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sh/rcar-dmac.c1770
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c6
-rw-r--r--drivers/dma/sh/shdma-base.c72
-rw-r--r--drivers/dma/sh/shdmac.c23
-rw-r--r--drivers/dma/sirf-dma.c59
-rw-r--r--drivers/dma/ste_dma40.c63
-rw-r--r--drivers/dma/sun6i-dma.c160
-rw-r--r--drivers/dma/tegra20-apb-dma.c42
-rw-r--r--drivers/dma/timb_dma.c8
-rw-r--r--drivers/dma/txx9dmac.c9
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c29
-rw-r--r--drivers/edac/amd64_edac.c10
-rw-r--r--drivers/edac/sb_edac.c9
-rw-r--r--drivers/firewire/core-transaction.c4
-rw-r--r--drivers/firewire/ohci.c5
-rw-r--r--drivers/firewire/sbp2.c11
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c16
-rw-r--r--drivers/gpio/gpio-tps65912.c14
-rw-r--r--drivers/gpio/gpiolib-of.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c3
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c7
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c22
-rw-r--r--drivers/gpu/drm/i915/intel_display.c34
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c21
-rw-r--r--drivers/gpu/drm/radeon/cik.c8
-rw-r--r--drivers/gpu/drm/radeon/cikd.h4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h4
-rw-r--r--drivers/gpu/drm/radeon/ni.c10
-rw-r--r--drivers/gpu/drm/radeon/nid.h4
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c22
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/gpu/drm/tegra/dc.c79
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c8
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-microsoft.c2
-rw-r--r--drivers/hid/hid-saitek.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c8
-rw-r--r--drivers/hid/hid-sony.c6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c7
-rw-r--r--drivers/hid/wacom_wac.c11
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/ads7828.c3
-rw-r--r--drivers/hwmon/pmbus/Kconfig2
-rw-r--r--drivers/i2c/Kconfig4
-rw-r--r--drivers/i2c/busses/Kconfig22
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c461
-rw-r--r--drivers/i2c/busses/i2c-cadence.c189
-rw-r--r--drivers/i2c/busses/i2c-designware-baytrail.c160
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c83
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h12
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c41
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c20
-rw-r--r--drivers/i2c/busses/i2c-imx.c33
-rw-r--r--drivers/i2c/busses/i2c-ocores.c91
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c7
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c99
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/i2c-core.c162
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c11
-rw-r--r--drivers/iio/Kconfig4
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/umem_odp.c3
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c158
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h29
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_wc_ppc64.c13
-rw-r--r--drivers/infiniband/hw/ipath/ipath_wc_x86_64.c15
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c7
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h38
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c38
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c312
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h68
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c241
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c183
-rw-r--r--drivers/infiniband/hw/qib/qib.h16
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h4
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c198
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c11
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c52
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c20
-rw-r--r--drivers/infiniband/hw/qib/qib_mmap.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c13
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c28
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c7
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c16
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c9
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c27
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c46
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c4
-rw-r--r--drivers/input/joystick/adi.c3
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c4
-rw-r--r--drivers/input/misc/bfin_rotary.c208
-rw-r--r--drivers/input/misc/soc_button_array.c2
-rw-r--r--drivers/input/mouse/alps.c516
-rw-r--r--drivers/input/mouse/alps.h65
-rw-r--r--drivers/input/mouse/cypress_ps2.c5
-rw-r--r--drivers/input/mouse/cypress_ps2.h5
-rw-r--r--drivers/input/mouse/focaltech.c10
-rw-r--r--drivers/input/mouse/focaltech.h1
-rw-r--r--drivers/input/mouse/psmouse-base.c6
-rw-r--r--drivers/input/mouse/synaptics.c10
-rw-r--r--drivers/input/mouse/synaptics.h1
-rw-r--r--drivers/irqchip/irq-mips-gic.c8
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/lguest/Makefile3
-rw-r--r--drivers/lguest/core.c29
-rw-r--r--drivers/lguest/hypercalls.c7
-rw-r--r--drivers/lguest/lg.h26
-rw-r--r--drivers/lguest/lguest_device.c540
-rw-r--r--drivers/lguest/lguest_user.c221
-rw-r--r--drivers/lguest/page_tables.c75
-rw-r--r--drivers/lguest/x86/core.c198
-rw-r--r--drivers/md/Kconfig4
-rw-r--r--drivers/md/dm-crypt.c392
-rw-r--r--drivers/md/dm-io.c6
-rw-r--r--drivers/md/dm-raid1.c9
-rw-r--r--drivers/md/dm-snap.c4
-rw-r--r--drivers/md/dm.c27
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/persistent-data/Kconfig2
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c4
-rw-r--r--drivers/md/raid1.c5
-rw-r--r--drivers/md/raid5.c13
-rw-r--r--drivers/mfd/88pm860x-core.c2
-rw-r--r--drivers/mfd/Kconfig39
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/da9063-core.c2
-rw-r--r--drivers/mfd/da9063-i2c.c9
-rw-r--r--drivers/mfd/da9150-core.c413
-rw-r--r--drivers/mfd/davinci_voicecodec.c2
-rw-r--r--drivers/mfd/db8500-prcmu.c9
-rw-r--r--drivers/mfd/dln2.c71
-rw-r--r--drivers/mfd/hi6421-pmic-core.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c3
-rw-r--r--drivers/mfd/intel_soc_pmic_core.h2
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c2
-rw-r--r--drivers/mfd/lm3533-core.c2
-rw-r--r--drivers/mfd/lpc_sch.c1
-rw-r--r--drivers/mfd/max77686.c29
-rw-r--r--drivers/mfd/mc13xxx-i2c.c2
-rw-r--r--drivers/mfd/mc13xxx-spi.c2
-rw-r--r--drivers/mfd/omap-usb-host.c10
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/qcom_rpm.c581
-rw-r--r--drivers/mfd/retu-mfd.c2
-rw-r--r--drivers/mfd/rt5033.c142
-rw-r--r--drivers/mfd/rtsx_usb.c18
-rw-r--r--drivers/mfd/smsc-ece1099.c2
-rw-r--r--drivers/mfd/sun6i-prcm.c14
-rw-r--r--drivers/mfd/tps65217.c2
-rw-r--r--drivers/mfd/tps65218.c2
-rw-r--r--drivers/mfd/twl-core.c8
-rw-r--r--drivers/mfd/twl6040.c4
-rw-r--r--drivers/mfd/wm8994-core.c6
-rw-r--r--drivers/mmc/host/sunxi-mmc.c63
-rw-r--r--drivers/mtd/bcm47xxpart.c43
-rw-r--r--drivers/mtd/chips/map_ram.c1
-rw-r--r--drivers/mtd/chips/map_rom.c13
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c137
-rw-r--r--drivers/mtd/maps/physmap_of.c10
-rw-r--r--drivers/mtd/mtdblock.c10
-rw-r--r--drivers/mtd/mtdconcat.c3
-rw-r--r--drivers/mtd/mtdcore.c28
-rw-r--r--drivers/mtd/nand/Kconfig7
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/ams-delta.c6
-rw-r--r--drivers/mtd/nand/atmel_nand.c31
-rw-r--r--drivers/mtd/nand/denali.c40
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c9
-rw-r--r--drivers/mtd/nand/hisi504_nand.c891
-rw-r--r--drivers/mtd/nand/jz4740_nand.c29
-rw-r--r--drivers/mtd/nand/nand_base.c31
-rw-r--r--drivers/mtd/nand/nandsim.c7
-rw-r--r--drivers/mtd/nand/omap2.c31
-rw-r--r--drivers/mtd/nand/sunxi_nand.c2
-rw-r--r--drivers/mtd/nftlmount.c18
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c93
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c63
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/appletalk/Kconfig2
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c7
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c7
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c47
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c175
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c122
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c54
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c246
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c24
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c119
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c143
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c9
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/rocker/rocker.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c9
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/ti/Kconfig4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/macvtap.c7
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c82
-rw-r--r--drivers/net/phy/phy.c23
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/usb/Kconfig13
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/plusb.c5
-rw-r--r--drivers/net/virtio_net.c6
-rw-r--r--drivers/net/wan/cosa.c12
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig6
-rw-r--r--drivers/net/xen-netback/netback.c29
-rw-r--r--drivers/of/of_pci.c1
-rw-r--r--drivers/pci/pcie/aer/Kconfig2
-rw-r--r--drivers/platform/x86/Kconfig25
-rw-r--r--drivers/platform/x86/asus-laptop.c97
-rw-r--r--drivers/platform/x86/classmate-laptop.c2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c7
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c77
-rw-r--r--drivers/platform/x86/samsung-laptop.c146
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c24
-rw-r--r--drivers/platform/x86/toshiba_acpi.c1025
-rw-r--r--drivers/pnp/resource.c6
-rw-r--r--drivers/pwm/Kconfig24
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/core.c2
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c6
-rw-r--r--drivers/pwm/pwm-img.c249
-rw-r--r--drivers/pwm/pwm-sti.c30
-rw-r--r--drivers/pwm/pwm-sun4i.c366
-rw-r--r--drivers/pwm/pwm-tegra.c2
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c8
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c1
-rw-r--r--drivers/rtc/Kconfig8
-rw-r--r--drivers/rtc/rtc-ds1685.c18
-rw-r--r--drivers/scsi/am53c974.c6
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1
-rw-r--r--drivers/scsi/hpsa.c4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c4
-rw-r--r--drivers/scsi/sg.c40
-rw-r--r--drivers/scsi/virtio_scsi.c6
-rw-r--r--drivers/scsi/wd719x.c1
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/spi/Kconfig6
-rw-r--r--drivers/staging/board/Kconfig2
-rw-r--r--drivers/staging/emxx_udc/Kconfig2
-rw-r--r--drivers/staging/iio/Kconfig4
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c12
-rw-r--r--drivers/target/iscsi/iscsi_target.c105
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c30
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h1
-rw-r--r--drivers/target/target_core_file.c5
-rw-r--r--drivers/target/target_core_iblock.c5
-rw-r--r--drivers/target/target_core_pr.c25
-rw-r--r--drivers/target/target_core_sbc.c140
-rw-r--r--drivers/target/target_core_spc.c2
-rw-r--r--drivers/thermal/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c14
-rw-r--r--drivers/thermal/int340x_thermal/int3402_thermal.c208
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c208
-rw-r--r--drivers/thermal/int340x_thermal/int340x_thermal_zone.c276
-rw-r--r--drivers/thermal/int340x_thermal/int340x_thermal_zone.h68
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c92
-rw-r--r--drivers/thermal/intel_powerclamp.c1
-rw-r--r--drivers/thermal/intel_soc_dts_thermal.c46
-rw-r--r--drivers/thermal/of-thermal.c3
-rw-r--r--drivers/thermal/rcar_thermal.c26
-rw-r--r--drivers/thermal/rockchip_thermal.c36
-rw-r--r--drivers/thermal/samsung/Kconfig9
-rw-r--r--drivers/thermal/samsung/Makefile2
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c427
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.h106
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c585
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h77
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c264
-rw-r--r--drivers/thermal/step_wise.c4
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c2
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/usb/gadget/Kconfig34
-rw-r--r--drivers/usb/gadget/legacy/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/Kconfig4
-rw-r--r--drivers/usb/phy/Kconfig2
-rw-r--r--drivers/vfio/pci/vfio_pci.c21
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c60
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h1
-rw-r--r--drivers/vfio/vfio.c119
-rw-r--r--drivers/vfio/vfio_iommu_type1.c80
-rw-r--r--drivers/vhost/net.c25
-rw-r--r--drivers/vhost/scsi.c1068
-rw-r--r--drivers/virtio/Kconfig24
-rw-r--r--drivers/virtio/Makefile3
-rw-r--r--drivers/virtio/virtio.c5
-rw-r--r--drivers/virtio/virtio_balloon.c9
-rw-r--r--drivers/virtio/virtio_mmio.c131
-rw-r--r--drivers/virtio/virtio_pci_common.c94
-rw-r--r--drivers/virtio/virtio_pci_common.h43
-rw-r--r--drivers/virtio/virtio_pci_legacy.c76
-rw-r--r--drivers/virtio/virtio_pci_modern.c695
-rw-r--r--drivers/virtio/virtio_ring.c9
-rw-r--r--drivers/watchdog/Kconfig25
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c21
-rw-r--r--drivers/watchdog/da9063_wdt.c32
-rw-r--r--drivers/watchdog/dw_wdt.c32
-rw-r--r--drivers/watchdog/gpio_wdt.c37
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/watchdog/imgpdc_wdt.c289
-rw-r--r--drivers/watchdog/imx2_wdt.c4
-rw-r--r--drivers/watchdog/it87_wdt.c6
-rw-r--r--drivers/watchdog/jz4740_wdt.c10
-rw-r--r--drivers/watchdog/mtk_wdt.c251
-rw-r--r--drivers/watchdog/omap_wdt.c2
-rw-r--r--drivers/watchdog/retu_wdt.c2
-rw-r--r--drivers/watchdog/rt2880_wdt.c9
-rw-r--r--drivers/watchdog/twl4030_wdt.c2
-rw-r--r--drivers/watchdog/w83627hf_wdt.c14
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/preempt.c44
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--drivers/xen/xen-scsiback.c14
-rw-r--r--fs/9p/vfs_inode.c2
-rw-r--r--fs/aio.c6
-rw-r--r--fs/autofs4/dev-ioctl.c8
-rw-r--r--fs/autofs4/expire.c2
-rw-r--r--fs/autofs4/root.c6
-rw-r--r--fs/bad_inode.c147
-rw-r--r--fs/binfmt_elf.c5
-rw-r--r--fs/btrfs/backref.c28
-rw-r--r--fs/btrfs/backref.h3
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/ctree.c55
-rw-r--r--fs/btrfs/ctree.h39
-rw-r--r--fs/btrfs/delayed-inode.c38
-rw-r--r--fs/btrfs/dev-replace.c25
-rw-r--r--fs/btrfs/disk-io.c102
-rw-r--r--fs/btrfs/disk-io.h6
-rw-r--r--fs/btrfs/extent-tree.c250
-rw-r--r--fs/btrfs/extent_io.c87
-rw-r--r--fs/btrfs/extent_io.h65
-rw-r--r--fs/btrfs/free-space-cache.c13
-rw-r--r--fs/btrfs/inode-item.c9
-rw-r--r--fs/btrfs/inode.c156
-rw-r--r--fs/btrfs/ioctl.c4
-rw-r--r--fs/btrfs/qgroup.c3
-rw-r--r--fs/btrfs/raid56.c103
-rw-r--r--fs/btrfs/raid56.h11
-rw-r--r--fs/btrfs/reada.c19
-rw-r--r--fs/btrfs/relocation.c12
-rw-r--r--fs/btrfs/scrub.c309
-rw-r--r--fs/btrfs/send.c9
-rw-r--r--fs/btrfs/super.c6
-rw-r--r--fs/btrfs/sysfs.c10
-rw-r--r--fs/btrfs/tests/extent-buffer-tests.c2
-rw-r--r--fs/btrfs/tests/extent-io-tests.c3
-rw-r--r--fs/btrfs/tests/inode-tests.c4
-rw-r--r--fs/btrfs/tests/qgroup-tests.c23
-rw-r--r--fs/btrfs/transaction.c27
-rw-r--r--fs/btrfs/transaction.h7
-rw-r--r--fs/btrfs/tree-log.c234
-rw-r--r--fs/btrfs/volumes.c249
-rw-r--r--fs/btrfs/volumes.h18
-rw-r--r--fs/cachefiles/daemon.c4
-rw-r--r--fs/cachefiles/interface.c4
-rw-r--r--fs/cachefiles/namei.c16
-rw-r--r--fs/cachefiles/rdwr.c2
-rw-r--r--fs/ceph/acl.c14
-rw-r--r--fs/ceph/addr.c19
-rw-r--r--fs/ceph/caps.c127
-rw-r--r--fs/ceph/dir.c35
-rw-r--r--fs/ceph/file.c39
-rw-r--r--fs/ceph/inode.c41
-rw-r--r--fs/ceph/locks.c9
-rw-r--r--fs/ceph/mds_client.c127
-rw-r--r--fs/ceph/mds_client.h2
-rw-r--r--fs/ceph/snap.c54
-rw-r--r--fs/ceph/super.c4
-rw-r--r--fs/ceph/super.h5
-rw-r--r--fs/cifs/file.c14
-rw-r--r--fs/coda/dir.c2
-rw-r--r--fs/configfs/configfs_internal.h3
-rw-r--r--fs/configfs/dir.c72
-rw-r--r--fs/configfs/file.c28
-rw-r--r--fs/configfs/inode.c12
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/dcache.c37
-rw-r--r--fs/debugfs/inode.c36
-rw-r--r--fs/ecryptfs/file.c2
-rw-r--r--fs/ecryptfs/inode.c4
-rw-r--r--fs/exportfs/expfs.c2
-rw-r--r--fs/ext4/ext4.h18
-rw-r--r--fs/ext4/indirect.c105
-rw-r--r--fs/ext4/inode.c7
-rw-r--r--fs/ext4/super.c31
-rw-r--r--fs/fs-writeback.c6
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/hfsplus/dir.c2
-rw-r--r--fs/hppfs/hppfs.c4
-rw-r--r--fs/internal.h2
-rw-r--r--fs/jbd2/recovery.c3
-rw-r--r--fs/jffs2/compr_rubin.c5
-rw-r--r--fs/jffs2/dir.c14
-rw-r--r--fs/jffs2/scan.c5
-rw-r--r--fs/jffs2/super.c2
-rw-r--r--fs/libfs.c2
-rw-r--r--fs/locks.c59
-rw-r--r--fs/namei.c2
-rw-r--r--fs/namespace.c10
-rw-r--r--fs/nfs/callback_proc.c2
-rw-r--r--fs/nfs/callback_xdr.c8
-rw-r--r--fs/nfs/delegation.c4
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/filelayout/filelayout.c53
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c43
-rw-r--r--fs/nfs/inode.c1
-rw-r--r--fs/nfs/internal.h13
-rw-r--r--fs/nfs/nfs4proc.c75
-rw-r--r--fs/nfs/nfs4session.c2
-rw-r--r--fs/nfs/nfs4session.h6
-rw-r--r--fs/nfs/nfs4xdr.c32
-rw-r--r--fs/nfs/pnfs.h4
-rw-r--r--fs/nfs/pnfs_nfs.c30
-rw-r--r--fs/nfs/write.c16
-rw-r--r--fs/nfsd/nfs4recover.c4
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/nfsd/nfsfh.c8
-rw-r--r--fs/nfsd/vfs.c8
-rw-r--r--fs/nilfs2/btree.c47
-rw-r--r--fs/notify/fanotify/fanotify.c6
-rw-r--r--fs/overlayfs/copy_up.c5
-rw-r--r--fs/overlayfs/dir.c34
-rw-r--r--fs/overlayfs/inode.c12
-rw-r--r--fs/overlayfs/overlayfs.h18
-rw-r--r--fs/overlayfs/readdir.c181
-rw-r--r--fs/overlayfs/super.c564
-rw-r--r--fs/posix_acl.c18
-rw-r--r--fs/proc/generic.c12
-rw-r--r--fs/proc/inode.c21
-rw-r--r--fs/proc/internal.h1
-rw-r--r--fs/reiserfs/xattr.c4
-rw-r--r--fs/super.c40
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/xfs_export.c6
-rw-r--r--fs/xfs/xfs_file.c28
-rw-r--r--fs/xfs/xfs_fsops.c6
-rw-r--r--fs/xfs/xfs_inode.c4
-rw-r--r--fs/xfs/xfs_inode.h9
-rw-r--r--fs/xfs/xfs_ioctl.c11
-rw-r--r--fs/xfs/xfs_iops.c49
-rw-r--r--fs/xfs/xfs_iops.h1
-rw-r--r--fs/xfs/xfs_mount.h11
-rw-r--r--fs/xfs/xfs_pnfs.c324
-rw-r--r--fs/xfs/xfs_pnfs.h18
-rw-r--r--fs/xfs/xfs_qm.c5
-rw-r--r--include/acpi/acpi_lpat.h65
-rw-r--r--include/asm-generic/pci_iomap.h10
-rw-r--r--include/drm/i915_pciids.h4
-rw-r--r--include/dt-bindings/clock/alphascale,asm9260.h97
-rw-r--r--include/dt-bindings/clock/exynos4.h7
-rw-r--r--include/dt-bindings/clock/exynos7-clk.h88
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq806x.h1
-rw-r--r--include/dt-bindings/clock/qcom,lcc-ipq806x.h30
-rw-r--r--include/dt-bindings/clock/qcom,lcc-msm8960.h50
-rw-r--r--include/dt-bindings/clock/tegra124-car-common.h345
-rw-r--r--include/dt-bindings/clock/tegra124-car.h345
-rw-r--r--include/dt-bindings/mfd/qcom-rpm.h154
-rw-r--r--include/dt-bindings/thermal/thermal_exynos.h (renamed from include/linux/clk/sunxi.h)20
-rw-r--r--include/linux/bcm47xx_wdt.h1
-rw-r--r--include/linux/ceph/ceph_fs.h37
-rw-r--r--include/linux/ceph/libceph.h3
-rw-r--r--include/linux/ceph/messenger.h4
-rw-r--r--include/linux/ceph/mon_client.h9
-rw-r--r--include/linux/clk-private.h220
-rw-r--r--include/linux/clk-provider.h58
-rw-r--r--include/linux/clk.h45
-rw-r--r--include/linux/clk/tegra.h2
-rw-r--r--include/linux/clk/ti.h25
-rw-r--r--include/linux/compiler.h6
-rw-r--r--include/linux/dcache.h103
-rw-r--r--include/linux/dmaengine.h120
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/hid-sensor-hub.h5
-rw-r--r--include/linux/i2c.h4
-rw-r--r--include/linux/irqchip/mips-gic.h1
-rw-r--r--include/linux/kdb.h8
-rw-r--r--include/linux/lguest_launcher.h61
-rw-r--r--include/linux/mfd/axp20x.h43
-rw-r--r--include/linux/mfd/da9063/core.h1
-rw-r--r--include/linux/mfd/da9150/core.h68
-rw-r--r--include/linux/mfd/da9150/registers.h1155
-rw-r--r--include/linux/mfd/max77686-private.h1
-rw-r--r--include/linux/mfd/max77686.h28
-rw-r--r--include/linux/mfd/qcom_rpm.h13
-rw-r--r--include/linux/mfd/rt5033-private.h260
-rw-r--r--include/linux/mfd/rt5033.h62
-rw-r--r--include/linux/mlx4/qp.h2
-rw-r--r--include/linux/mtd/mtd.h1
-rw-r--r--include/linux/mtd/spi-nor.h7
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_xdr.h19
-rw-r--r--include/linux/nvme.h9
-rw-r--r--include/linux/platform_data/bfin_rotary.h (renamed from arch/blackfin/include/asm/bfin_rotary.h)1
-rw-r--r--include/linux/platform_data/dma-dw.h6
-rw-r--r--include/linux/platform_data/dma-mmp_tdma.h7
-rw-r--r--include/linux/rhashtable.h22
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/sunrpc/metrics.h7
-rw-r--r--include/linux/thermal.h56
-rw-r--r--include/linux/vfio.h2
-rw-r--r--include/linux/virtio_mmio.h44
-rw-r--r--include/net/caif/cfpkt.h2
-rw-r--r--include/target/iscsi/iscsi_target_core.h (renamed from drivers/target/iscsi/iscsi_target_core.h)14
-rw-r--r--include/target/iscsi/iscsi_target_stat.h (renamed from drivers/target/iscsi/iscsi_target_stat.h)0
-rw-r--r--include/target/iscsi/iscsi_transport.h2
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--include/uapi/linux/btrfs.h3
-rw-r--r--include/uapi/linux/nvme.h26
-rw-r--r--include/uapi/linux/prctl.h5
-rw-r--r--include/uapi/linux/tc_act/Kbuild1
-rw-r--r--include/uapi/linux/vfio.h1
-rw-r--r--include/uapi/linux/virtio_balloon.h3
-rw-r--r--include/uapi/linux/virtio_blk.h17
-rw-r--r--include/uapi/linux/virtio_config.h2
-rw-r--r--include/uapi/linux/virtio_net.h42
-rw-r--r--include/uapi/linux/virtio_pci.h93
-rw-r--r--include/uapi/rdma/ib_user_verbs.h23
-rw-r--r--include/xen/xen-ops.h26
-rw-r--r--init/Kconfig8
-rw-r--r--kernel/debug/debug_core.c19
-rw-r--r--kernel/debug/kdb/kdb_io.c46
-rw-r--r--kernel/debug/kdb/kdb_main.c16
-rw-r--r--kernel/debug/kdb/kdb_private.h4
-rw-r--r--kernel/gcov/Makefile36
-rw-r--r--kernel/livepatch/core.c10
-rw-r--r--kernel/locking/rtmutex.c4
-rw-r--r--kernel/printk/printk.c2
-rw-r--r--kernel/rcu/tree_plugin.h1
-rw-r--r--kernel/sched/auto_group.c6
-rw-r--r--kernel/sched/completion.c19
-rw-r--r--kernel/sched/core.c113
-rw-r--r--kernel/sched/deadline.c33
-rw-r--r--kernel/sched/sched.h76
-rw-r--r--kernel/sys.c15
-rw-r--r--kernel/time/ntp.c10
-rw-r--r--lib/Kconfig30
-rw-r--r--lib/pci_iomap.c35
-rw-r--r--lib/rhashtable.c62
-rw-r--r--lib/test_rhashtable.c11
-rw-r--r--mm/Kconfig22
-rw-r--r--mm/memcontrol.c16
-rw-r--r--mm/nommu.c4
-rw-r--r--mm/page_alloc.c9
-rw-r--r--mm/shmem.c7
-rw-r--r--net/9p/trans_virtio.c6
-rw-r--r--net/Kconfig14
-rw-r--r--net/bridge/br.c2
-rw-r--r--net/caif/cffrml.c2
-rw-r--r--net/caif/cfpkt_skbuff.c6
-rw-r--r--net/ceph/ceph_common.c16
-rw-r--r--net/ceph/ceph_strings.c14
-rw-r--r--net/ceph/debugfs.c2
-rw-r--r--net/ceph/messenger.c14
-rw-r--r--net/ceph/mon_client.c139
-rw-r--r--net/ceph/osd_client.c31
-rw-r--r--net/compat.c9
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/gen_stats.c15
-rw-r--r--net/core/pktgen.c3
-rw-r--r--net/core/rtnetlink.c15
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/decnet/dn_route.c2
-rw-r--r--net/hsr/hsr_device.c3
-rw-r--r--net/hsr/hsr_main.c4
-rw-r--r--net/hsr/hsr_slave.c10
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv6/addrconf.c17
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/irda/ircomm/ircomm_tty.c2
-rw-r--r--net/irda/irnet/irnet_ppp.c4
-rw-r--r--net/mac80211/chan.c5
-rw-r--r--net/mac80211/rc80211_minstrel.c2
-rw-r--r--net/mac80211/tx.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nft_compat.c12
-rw-r--r--net/netfilter/nft_hash.c2
-rw-r--r--net/netfilter/xt_recent.c11
-rw-r--r--net/netfilter/xt_socket.c21
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/openvswitch/datapath.c45
-rw-r--r--net/openvswitch/flow_netlink.c8
-rw-r--r--net/openvswitch/vport.h2
-rw-r--r--net/packet/af_packet.c20
-rw-r--r--net/rxrpc/ar-ack.c9
-rw-r--r--net/sched/Kconfig2
-rw-r--r--net/sched/ematch.c1
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_upcall.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/backchannel_rqst.c5
-rw-r--r--net/switchdev/Kconfig2
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/wireless/core.c1
-rw-r--r--net/wireless/nl80211.c12
-rw-r--r--net/wireless/reg.c2
-rw-r--r--scripts/Kbuild.include7
-rw-r--r--scripts/Makefile.clean3
-rw-r--r--scripts/gdb/linux/__init__.py1
-rw-r--r--scripts/kconfig/confdata.c1
-rwxr-xr-xscripts/kconfig/merge_config.sh5
-rwxr-xr-xscripts/package/builddeb17
-rw-r--r--security/apparmor/include/apparmor.h4
-rw-r--r--security/apparmor/lsm.c20
-rw-r--r--security/apparmor/path.c2
-rw-r--r--security/inode.c2
-rw-r--r--security/integrity/Kconfig4
-rw-r--r--security/integrity/evm/Kconfig2
-rw-r--r--security/selinux/hooks.c8
-rw-r--r--security/smack/smack_lsm.c4
-rw-r--r--security/tomoyo/file.c4
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/core/seq/seq_midi_emul.c3
-rw-r--r--sound/firewire/amdtp.c5
-rw-r--r--sound/firewire/bebob/bebob.c20
-rw-r--r--sound/firewire/bebob/bebob_stream.c16
-rw-r--r--sound/firewire/dice/dice-stream.c18
-rw-r--r--sound/firewire/dice/dice.c16
-rw-r--r--sound/firewire/fireworks/fireworks.c20
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c19
-rw-r--r--sound/firewire/oxfw/oxfw-stream.c6
-rw-r--r--sound/firewire/oxfw/oxfw.c21
-rw-r--r--sound/pci/hda/hda_controller.c5
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/hda_tegra.c4
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/pci/hda/patch_sigmatel.c17
-rw-r--r--sound/pci/rme9652/hdspm.c6
-rw-r--r--sound/soc/intel/sst-haswell-pcm.c3
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c2
-rw-r--r--sound/usb/clock.c5
-rw-r--r--sound/usb/line6/driver.c14
-rw-r--r--sound/usb/line6/driver.h8
-rw-r--r--sound/usb/quirks.c8
-rw-r--r--sound/usb/quirks.h2
-rw-r--r--tools/lguest/Makefile8
-rw-r--r--tools/lguest/lguest.c2016
-rw-r--r--tools/perf/bench/mem-memcpy.c4
-rw-r--r--tools/perf/config/Makefile.arch4
-rw-r--r--tools/perf/config/feature-checks/Makefile2
-rw-r--r--tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c3
-rw-r--r--tools/perf/util/cloexec.c18
-rw-r--r--tools/perf/util/evlist.h2
-rw-r--r--tools/perf/util/symbol-elf.c5
-rw-r--r--tools/thermal/tmon/.gitignore1
-rw-r--r--tools/thermal/tmon/Makefile15
-rw-r--r--tools/thermal/tmon/tmon.82
-rw-r--r--tools/thermal/tmon/tmon.c14
-rw-r--r--tools/thermal/tmon/tui.c45
1279 files changed, 49533 insertions, 20221 deletions
diff --git a/Documentation/ABI/testing/sysfs-driver-samsung-laptop b/Documentation/ABI/testing/sysfs-driver-samsung-laptop
index 678819a3f8bf..63c1ad0212fc 100644
--- a/Documentation/ABI/testing/sysfs-driver-samsung-laptop
+++ b/Documentation/ABI/testing/sysfs-driver-samsung-laptop
@@ -35,3 +35,11 @@ Contact: Corentin Chary <corentin.chary@gmail.com>
35Description: Use your USB ports to charge devices, even 35Description: Use your USB ports to charge devices, even
36 when your laptop is powered off. 36 when your laptop is powered off.
37 1 means enabled, 0 means disabled. 37 1 means enabled, 0 means disabled.
38
39What: /sys/devices/platform/samsung/lid_handling
40Date: December 11, 2014
41KernelVersion: 3.19
42Contact: Julijonas Kikutis <julijonas.kikutis@gmail.com>
43Description: Some Samsung laptops handle lid closing quicker and
44 only handle lid opening with this mode enabled.
45 1 means enabled, 0 means disabled.
diff --git a/Documentation/ABI/testing/sysfs-driver-toshiba_acpi b/Documentation/ABI/testing/sysfs-driver-toshiba_acpi
new file mode 100644
index 000000000000..ca9c71a531c5
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-toshiba_acpi
@@ -0,0 +1,114 @@
1What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_backlight_mode
2Date: June 8, 2014
3KernelVersion: 3.15
4Contact: Azael Avalos <coproscefalo@gmail.com>
5Description: This file controls the keyboard backlight operation mode, valid
6 values are:
7 * 0x1 -> FN-Z
8 * 0x2 -> AUTO (also called TIMER)
9 * 0x8 -> ON
10 * 0x10 -> OFF
11 Note that the kernel 3.16 onwards this file accepts all listed
12 parameters, kernel 3.15 only accepts the first two (FN-Z and
13 AUTO).
14Users: KToshiba
15
16What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_backlight_timeout
17Date: June 8, 2014
18KernelVersion: 3.15
19Contact: Azael Avalos <coproscefalo@gmail.com>
20Description: This file controls the timeout of the keyboard backlight
21 whenever the operation mode is set to AUTO (or TIMER),
22 valid values range from 0-60.
23 Note that the kernel 3.15 only had support for the first
24 keyboard type, the kernel 3.16 added support for the second
25 type and the range accepted for type 2 is 1-60.
26 See the entry named "kbd_type"
27Users: KToshiba
28
29What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/position
30Date: June 8, 2014
31KernelVersion: 3.15
32Contact: Azael Avalos <coproscefalo@gmail.com>
33Description: This file shows the absolute position of the built-in
34 accelereometer.
35
36What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/touchpad
37Date: June 8, 2014
38KernelVersion: 3.15
39Contact: Azael Avalos <coproscefalo@gmail.com>
40Description: This files controls the status of the touchpad and pointing
41 stick (if available), valid values are:
42 * 0 -> OFF
43 * 1 -> ON
44Users: KToshiba
45
46What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/available_kbd_modes
47Date: August 3, 2014
48KernelVersion: 3.16
49Contact: Azael Avalos <coproscefalo@gmail.com>
50Description: This file shows the supported keyboard backlight modes
51 the system supports, which can be:
52 * 0x1 -> FN-Z
53 * 0x2 -> AUTO (also called TIMER)
54 * 0x8 -> ON
55 * 0x10 -> OFF
56 Note that not all keyboard types support the listed modes.
57 See the entry named "available_kbd_modes"
58Users: KToshiba
59
60What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_type
61Date: August 3, 2014
62KernelVersion: 3.16
63Contact: Azael Avalos <coproscefalo@gmail.com>
64Description: This file shows the current keyboard backlight type,
65 which can be:
66 * 1 -> Type 1, supporting modes FN-Z and AUTO
67 * 2 -> Type 2, supporting modes TIMER, ON and OFF
68Users: KToshiba
69
70What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/version
71Date: February, 2015
72KernelVersion: 3.20
73Contact: Azael Avalos <coproscefalo@gmail.com>
74Description: This file shows the current version of the driver
75
76What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/fan
77Date: February, 2015
78KernelVersion: 3.20
79Contact: Azael Avalos <coproscefalo@gmail.com>
80Description: This file controls the state of the internal fan, valid
81 values are:
82 * 0 -> OFF
83 * 1 -> ON
84
85What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_function_keys
86Date: February, 2015
87KernelVersion: 3.20
88Contact: Azael Avalos <coproscefalo@gmail.com>
89Description: This file controls the Special Functions (hotkeys) operation
90 mode, valid values are:
91 * 0 -> Normal Operation
92 * 1 -> Special Functions
93 In the "Normal Operation" mode, the F{1-12} keys are as usual
94 and the hotkeys are accessed via FN-F{1-12}.
95 In the "Special Functions" mode, the F{1-12} keys trigger the
96 hotkey and the F{1-12} keys are accessed via FN-F{1-12}.
97
98What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/panel_power_on
99Date: February, 2015
100KernelVersion: 3.20
101Contact: Azael Avalos <coproscefalo@gmail.com>
102Description: This file controls whether the laptop should turn ON whenever
103 the LID is opened, valid values are:
104 * 0 -> Disabled
105 * 1 -> Enabled
106
107What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/usb_three
108Date: February, 2015
109KernelVersion: 3.20
110Contact: Azael Avalos <coproscefalo@gmail.com>
111Description: This file controls whether the USB 3 functionality, valid
112 values are:
113 * 0 -> Disabled (Acts as a regular USB 2)
114 * 1 -> Enabled (Full USB 3 functionality)
diff --git a/Documentation/DocBook/kgdb.tmpl b/Documentation/DocBook/kgdb.tmpl
index 2428cc04dbc8..f3abca7ec53d 100644
--- a/Documentation/DocBook/kgdb.tmpl
+++ b/Documentation/DocBook/kgdb.tmpl
@@ -197,6 +197,7 @@
197 may be configured as a kernel built-in or a kernel loadable module. 197 may be configured as a kernel built-in or a kernel loadable module.
198 You can only make use of <constant>kgdbwait</constant> and early 198 You can only make use of <constant>kgdbwait</constant> and early
199 debugging if you build kgdboc into the kernel as a built-in. 199 debugging if you build kgdboc into the kernel as a built-in.
200 </para>
200 <para>Optionally you can elect to activate kms (Kernel Mode 201 <para>Optionally you can elect to activate kms (Kernel Mode
201 Setting) integration. When you use kms with kgdboc and you have a 202 Setting) integration. When you use kms with kgdboc and you have a
202 video driver that has atomic mode setting hooks, it is possible to 203 video driver that has atomic mode setting hooks, it is possible to
@@ -206,7 +207,6 @@
206 crashes or doing analysis of memory with kdb while allowing the 207 crashes or doing analysis of memory with kdb while allowing the
207 full graphics console applications to run. 208 full graphics console applications to run.
208 </para> 209 </para>
209 </para>
210 <sect2 id="kgdbocArgs"> 210 <sect2 id="kgdbocArgs">
211 <title>kgdboc arguments</title> 211 <title>kgdboc arguments</title>
212 <para>Usage: <constant>kgdboc=[kms][[,]kbd][[,]serial_device][,baud]</constant></para> 212 <para>Usage: <constant>kgdboc=[kms][[,]kbd][[,]serial_device][,baud]</constant></para>
@@ -284,7 +284,6 @@
284 </listitem> 284 </listitem>
285 </orderedlist> 285 </orderedlist>
286 </para> 286 </para>
287 </sect3>
288 <para>NOTE: Kgdboc does not support interrupting the target via the 287 <para>NOTE: Kgdboc does not support interrupting the target via the
289 gdb remote protocol. You must manually send a sysrq-g unless you 288 gdb remote protocol. You must manually send a sysrq-g unless you
290 have a proxy that splits console output to a terminal program. 289 have a proxy that splits console output to a terminal program.
@@ -305,6 +304,7 @@
305 as well as on the initial connect, or to use a debugger proxy that 304 as well as on the initial connect, or to use a debugger proxy that
306 allows an unmodified gdb to do the debugging. 305 allows an unmodified gdb to do the debugging.
307 </para> 306 </para>
307 </sect3>
308 </sect2> 308 </sect2>
309 </sect1> 309 </sect1>
310 <sect1 id="kgdbwait"> 310 <sect1 id="kgdbwait">
@@ -350,12 +350,12 @@
350 </para> 350 </para>
351 </listitem> 351 </listitem>
352 </orderedlist> 352 </orderedlist>
353 </para>
353 <para>IMPORTANT NOTE: You cannot use kgdboc + kgdbcon on a tty that is an 354 <para>IMPORTANT NOTE: You cannot use kgdboc + kgdbcon on a tty that is an
354 active system console. An example of incorrect usage is <constant>console=ttyS0,115200 kgdboc=ttyS0 kgdbcon</constant> 355 active system console. An example of incorrect usage is <constant>console=ttyS0,115200 kgdboc=ttyS0 kgdbcon</constant>
355 </para> 356 </para>
356 <para>It is possible to use this option with kgdboc on a tty that is not a system console. 357 <para>It is possible to use this option with kgdboc on a tty that is not a system console.
357 </para> 358 </para>
358 </para>
359 </sect1> 359 </sect1>
360 <sect1 id="kgdbreboot"> 360 <sect1 id="kgdbreboot">
361 <title>Run time parameter: kgdbreboot</title> 361 <title>Run time parameter: kgdbreboot</title>
diff --git a/Documentation/cgroups/unified-hierarchy.txt b/Documentation/cgroups/unified-hierarchy.txt
index 71daa35ec2d9..eb102fb72213 100644
--- a/Documentation/cgroups/unified-hierarchy.txt
+++ b/Documentation/cgroups/unified-hierarchy.txt
@@ -404,8 +404,8 @@ supported and the interface files "release_agent" and
404 be understood as an underflow into the highest possible value, -2 or 404 be understood as an underflow into the highest possible value, -2 or
405 -10M etc. do not work, so it's not consistent. 405 -10M etc. do not work, so it's not consistent.
406 406
407 memory.low, memory.high, and memory.max will use the string 407 memory.low, memory.high, and memory.max will use the string "max" to
408 "infinity" to indicate and set the highest possible value. 408 indicate and set the highest possible value.
409 409
4105. Planned Changes 4105. Planned Changes
411 411
diff --git a/Documentation/clk.txt b/Documentation/clk.txt
index 4ff84623d5e1..0e4f90aa1c13 100644
--- a/Documentation/clk.txt
+++ b/Documentation/clk.txt
@@ -73,6 +73,8 @@ the operations defined in clk.h:
73 unsigned long *parent_rate); 73 unsigned long *parent_rate);
74 long (*determine_rate)(struct clk_hw *hw, 74 long (*determine_rate)(struct clk_hw *hw,
75 unsigned long rate, 75 unsigned long rate,
76 unsigned long min_rate,
77 unsigned long max_rate,
76 unsigned long *best_parent_rate, 78 unsigned long *best_parent_rate,
77 struct clk_hw **best_parent_clk); 79 struct clk_hw **best_parent_clk);
78 int (*set_parent)(struct clk_hw *hw, u8 index); 80 int (*set_parent)(struct clk_hw *hw, u8 index);
diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt
index c81839b52c4d..ad697781f9ac 100644
--- a/Documentation/device-mapper/dm-crypt.txt
+++ b/Documentation/device-mapper/dm-crypt.txt
@@ -51,7 +51,7 @@ Parameters: <cipher> <key> <iv_offset> <device path> \
51 Otherwise #opt_params is the number of following arguments. 51 Otherwise #opt_params is the number of following arguments.
52 52
53 Example of optional parameters section: 53 Example of optional parameters section:
54 1 allow_discards 54 3 allow_discards same_cpu_crypt submit_from_crypt_cpus
55 55
56allow_discards 56allow_discards
57 Block discard requests (a.k.a. TRIM) are passed through the crypt device. 57 Block discard requests (a.k.a. TRIM) are passed through the crypt device.
@@ -63,6 +63,19 @@ allow_discards
63 used space etc.) if the discarded blocks can be located easily on the 63 used space etc.) if the discarded blocks can be located easily on the
64 device later. 64 device later.
65 65
66same_cpu_crypt
67 Perform encryption using the same cpu that IO was submitted on.
68 The default is to use an unbound workqueue so that encryption work
69 is automatically balanced between available CPUs.
70
71submit_from_crypt_cpus
72 Disable offloading writes to a separate thread after encryption.
73 There are some situations where offloading write bios from the
74 encryption threads to a single thread degrades performance
75 significantly. The default is to offload write bios to the same
76 thread because it benefits CFQ to have writes submitted using the
77 same context.
78
66Example scripts 79Example scripts
67=============== 80===============
68LUKS (Linux Unified Key Setup) is now the preferred way to set up disk 81LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
diff --git a/Documentation/devicetree/bindings/clock/exynos7-clock.txt b/Documentation/devicetree/bindings/clock/exynos7-clock.txt
index 6d3d5f80c1c3..6bf1e7493f61 100644
--- a/Documentation/devicetree/bindings/clock/exynos7-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos7-clock.txt
@@ -34,6 +34,8 @@ Required Properties for Clock Controller:
34 - "samsung,exynos7-clock-peris" 34 - "samsung,exynos7-clock-peris"
35 - "samsung,exynos7-clock-fsys0" 35 - "samsung,exynos7-clock-fsys0"
36 - "samsung,exynos7-clock-fsys1" 36 - "samsung,exynos7-clock-fsys1"
37 - "samsung,exynos7-clock-mscl"
38 - "samsung,exynos7-clock-aud"
37 39
38 - reg: physical base address of the controller and the length of 40 - reg: physical base address of the controller and the length of
39 memory mapped region. 41 memory mapped region.
@@ -53,6 +55,7 @@ Input clocks for top0 clock controller:
53 - dout_sclk_bus1_pll 55 - dout_sclk_bus1_pll
54 - dout_sclk_cc_pll 56 - dout_sclk_cc_pll
55 - dout_sclk_mfc_pll 57 - dout_sclk_mfc_pll
58 - dout_sclk_aud_pll
56 59
57Input clocks for top1 clock controller: 60Input clocks for top1 clock controller:
58 - fin_pll 61 - fin_pll
@@ -76,6 +79,14 @@ Input clocks for peric1 clock controller:
76 - sclk_uart1 79 - sclk_uart1
77 - sclk_uart2 80 - sclk_uart2
78 - sclk_uart3 81 - sclk_uart3
82 - sclk_spi0
83 - sclk_spi1
84 - sclk_spi2
85 - sclk_spi3
86 - sclk_spi4
87 - sclk_i2s1
88 - sclk_pcm1
89 - sclk_spdif
79 90
80Input clocks for peris clock controller: 91Input clocks for peris clock controller:
81 - fin_pll 92 - fin_pll
@@ -91,3 +102,7 @@ Input clocks for fsys1 clock controller:
91 - dout_aclk_fsys1_200 102 - dout_aclk_fsys1_200
92 - dout_sclk_mmc0 103 - dout_sclk_mmc0
93 - dout_sclk_mmc1 104 - dout_sclk_mmc1
105
106Input clocks for aud clock controller:
107 - fin_pll
108 - fout_aud_pll
diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt b/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt
index ded5d6212c84..c6620bc96703 100644
--- a/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt
+++ b/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt
@@ -1,4 +1,4 @@
1NVIDIA Tegra124 Clock And Reset Controller 1NVIDIA Tegra124 and Tegra132 Clock And Reset Controller
2 2
3This binding uses the common clock binding: 3This binding uses the common clock binding:
4Documentation/devicetree/bindings/clock/clock-bindings.txt 4Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -7,14 +7,16 @@ The CAR (Clock And Reset) Controller on Tegra is the HW module responsible
7for muxing and gating Tegra's clocks, and setting their rates. 7for muxing and gating Tegra's clocks, and setting their rates.
8 8
9Required properties : 9Required properties :
10- compatible : Should be "nvidia,tegra124-car" 10- compatible : Should be "nvidia,tegra124-car" or "nvidia,tegra132-car"
11- reg : Should contain CAR registers location and length 11- reg : Should contain CAR registers location and length
12- clocks : Should contain phandle and clock specifiers for two clocks: 12- clocks : Should contain phandle and clock specifiers for two clocks:
13 the 32 KHz "32k_in", and the board-specific oscillator "osc". 13 the 32 KHz "32k_in", and the board-specific oscillator "osc".
14- #clock-cells : Should be 1. 14- #clock-cells : Should be 1.
15 In clock consumers, this cell represents the clock ID exposed by the 15 In clock consumers, this cell represents the clock ID exposed by the
16 CAR. The assignments may be found in header file 16 CAR. The assignments may be found in the header files
17 <dt-bindings/clock/tegra124-car.h>. 17 <dt-bindings/clock/tegra124-car-common.h> (which covers IDs common
18 to Tegra124 and Tegra132) and <dt-bindings/clock/tegra124-car.h>
19 (for Tegra124-specific clocks).
18- #reset-cells : Should be 1. 20- #reset-cells : Should be 1.
19 In clock consumers, this cell represents the bit number in the CAR's 21 In clock consumers, this cell represents the bit number in the CAR's
20 array of CLK_RST_CONTROLLER_RST_DEVICES_* registers. 22 array of CLK_RST_CONTROLLER_RST_DEVICES_* registers.
diff --git a/Documentation/devicetree/bindings/clock/qcom,lcc.txt b/Documentation/devicetree/bindings/clock/qcom,lcc.txt
new file mode 100644
index 000000000000..dd755be63a01
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,lcc.txt
@@ -0,0 +1,21 @@
1Qualcomm LPASS Clock & Reset Controller Binding
2------------------------------------------------
3
4Required properties :
5- compatible : shall contain only one of the following:
6
7 "qcom,lcc-msm8960"
8 "qcom,lcc-apq8064"
9 "qcom,lcc-ipq8064"
10
11- reg : shall contain base register location and length
12- #clock-cells : shall contain 1
13- #reset-cells : shall contain 1
14
15Example:
16 clock-controller@28000000 {
17 compatible = "qcom,lcc-ipq8064";
18 reg = <0x28000000 0x1000>;
19 #clock-cells = <1>;
20 #reset-cells = <1>;
21 };
diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
index 266ff9d23229..df4a259a6898 100644
--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt
+++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
@@ -1,6 +1,6 @@
1* Clock Block on Freescale CoreNet Platforms 1* Clock Block on Freescale QorIQ Platforms
2 2
3Freescale CoreNet chips take primary clocking input from the external 3Freescale qoriq chips take primary clocking input from the external
4SYSCLK signal. The SYSCLK input (frequency) is multiplied using 4SYSCLK signal. The SYSCLK input (frequency) is multiplied using
5multiple phase locked loops (PLL) to create a variety of frequencies 5multiple phase locked loops (PLL) to create a variety of frequencies
6which can then be passed to a variety of internal logic, including 6which can then be passed to a variety of internal logic, including
@@ -29,6 +29,7 @@ Required properties:
29 * "fsl,t4240-clockgen" 29 * "fsl,t4240-clockgen"
30 * "fsl,b4420-clockgen" 30 * "fsl,b4420-clockgen"
31 * "fsl,b4860-clockgen" 31 * "fsl,b4860-clockgen"
32 * "fsl,ls1021a-clockgen"
32 Chassis clock strings include: 33 Chassis clock strings include:
33 * "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks 34 * "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks
34 * "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks 35 * "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
index 2e18676bd4b5..0a80fa70ca26 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
@@ -11,6 +11,7 @@ Required Properties:
11 11
12 - compatible: Must be one of the following 12 - compatible: Must be one of the following
13 - "renesas,r7s72100-mstp-clocks" for R7S72100 (RZ) MSTP gate clocks 13 - "renesas,r7s72100-mstp-clocks" for R7S72100 (RZ) MSTP gate clocks
14 - "renesas,r8a73a4-mstp-clocks" for R8A73A4 (R-Mobile APE6) MSTP gate clocks
14 - "renesas,r8a7740-mstp-clocks" for R8A7740 (R-Mobile A1) MSTP gate clocks 15 - "renesas,r8a7740-mstp-clocks" for R8A7740 (R-Mobile A1) MSTP gate clocks
15 - "renesas,r8a7779-mstp-clocks" for R8A7779 (R-Car H1) MSTP gate clocks 16 - "renesas,r8a7779-mstp-clocks" for R8A7779 (R-Car H1) MSTP gate clocks
16 - "renesas,r8a7790-mstp-clocks" for R8A7790 (R-Car H2) MSTP gate clocks 17 - "renesas,r8a7790-mstp-clocks" for R8A7790 (R-Car H2) MSTP gate clocks
diff --git a/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt
new file mode 100644
index 000000000000..ece92393e80d
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt
@@ -0,0 +1,33 @@
1* Renesas R8A73A4 Clock Pulse Generator (CPG)
2
3The CPG generates core clocks for the R8A73A4 SoC. It includes five PLLs
4and several fixed ratio dividers.
5
6Required Properties:
7
8 - compatible: Must be "renesas,r8a73a4-cpg-clocks"
9
10 - reg: Base address and length of the memory resource used by the CPG
11
12 - clocks: Reference to the parent clocks ("extal1" and "extal2")
13
14 - #clock-cells: Must be 1
15
16 - clock-output-names: The names of the clocks. Supported clocks are "main",
17 "pll0", "pll1", "pll2", "pll2s", "pll2h", "z", "z2", "i", "m3", "b",
18 "m1", "m2", "zx", "zs", and "hp".
19
20
21Example
22-------
23
24 cpg_clocks: cpg_clocks@e6150000 {
25 compatible = "renesas,r8a73a4-cpg-clocks";
26 reg = <0 0xe6150000 0 0x10000>;
27 clocks = <&extal1_clk>, <&extal2_clk>;
28 #clock-cells = <1>;
29 clock-output-names = "main", "pll0", "pll1", "pll2",
30 "pll2s", "pll2h", "z", "z2",
31 "i", "m3", "b", "m1", "m2",
32 "zx", "zs", "hp";
33 };
diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
index e6ad35b894f9..b02944fba9de 100644
--- a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
@@ -8,15 +8,18 @@ Required Properties:
8 - compatible: Must be one of 8 - compatible: Must be one of
9 - "renesas,r8a7790-cpg-clocks" for the r8a7790 CPG 9 - "renesas,r8a7790-cpg-clocks" for the r8a7790 CPG
10 - "renesas,r8a7791-cpg-clocks" for the r8a7791 CPG 10 - "renesas,r8a7791-cpg-clocks" for the r8a7791 CPG
11 - "renesas,r8a7793-cpg-clocks" for the r8a7793 CPG
11 - "renesas,r8a7794-cpg-clocks" for the r8a7794 CPG 12 - "renesas,r8a7794-cpg-clocks" for the r8a7794 CPG
12 - "renesas,rcar-gen2-cpg-clocks" for the generic R-Car Gen2 CPG 13 - "renesas,rcar-gen2-cpg-clocks" for the generic R-Car Gen2 CPG
13 14
14 - reg: Base address and length of the memory resource used by the CPG 15 - reg: Base address and length of the memory resource used by the CPG
15 16
16 - clocks: Reference to the parent clock 17 - clocks: References to the parent clocks: first to the EXTAL clock, second
18 to the USB_EXTAL clock
17 - #clock-cells: Must be 1 19 - #clock-cells: Must be 1
18 - clock-output-names: The names of the clocks. Supported clocks are "main", 20 - clock-output-names: The names of the clocks. Supported clocks are "main",
19 "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1" and "z" 21 "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1", "z", "rcan", and
22 "adsp"
20 23
21 24
22Example 25Example
@@ -26,8 +29,9 @@ Example
26 compatible = "renesas,r8a7790-cpg-clocks", 29 compatible = "renesas,r8a7790-cpg-clocks",
27 "renesas,rcar-gen2-cpg-clocks"; 30 "renesas,rcar-gen2-cpg-clocks";
28 reg = <0 0xe6150000 0 0x1000>; 31 reg = <0 0xe6150000 0 0x1000>;
29 clocks = <&extal_clk>; 32 clocks = <&extal_clk &usb_extal_clk>;
30 #clock-cells = <1>; 33 #clock-cells = <1>;
31 clock-output-names = "main", "pll0, "pll1", "pll3", 34 clock-output-names = "main", "pll0, "pll1", "pll3",
32 "lb", "qspi", "sdh", "sd0", "sd1", "z"; 35 "lb", "qspi", "sdh", "sd0", "sd1", "z",
36 "rcan", "adsp";
33 }; 37 };
diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt
index 67b2b99f2b33..60b44285250d 100644
--- a/Documentation/devicetree/bindings/clock/sunxi.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi.txt
@@ -26,7 +26,7 @@ Required properties:
26 "allwinner,sun5i-a10s-ahb-gates-clk" - for the AHB gates on A10s 26 "allwinner,sun5i-a10s-ahb-gates-clk" - for the AHB gates on A10s
27 "allwinner,sun7i-a20-ahb-gates-clk" - for the AHB gates on A20 27 "allwinner,sun7i-a20-ahb-gates-clk" - for the AHB gates on A20
28 "allwinner,sun6i-a31-ar100-clk" - for the AR100 on A31 28 "allwinner,sun6i-a31-ar100-clk" - for the AR100 on A31
29 "allwinner,sun6i-a31-ahb1-mux-clk" - for the AHB1 multiplexer on A31 29 "allwinner,sun6i-a31-ahb1-clk" - for the AHB1 clock on A31
30 "allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31 30 "allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31
31 "allwinner,sun8i-a23-ahb1-gates-clk" - for the AHB1 gates on A23 31 "allwinner,sun8i-a23-ahb1-gates-clk" - for the AHB1 gates on A23
32 "allwinner,sun9i-a80-ahb0-gates-clk" - for the AHB0 gates on A80 32 "allwinner,sun9i-a80-ahb0-gates-clk" - for the AHB0 gates on A80
@@ -55,9 +55,11 @@ Required properties:
55 "allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31 55 "allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31
56 "allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23 56 "allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23
57 "allwinner,sun5i-a13-mbus-clk" - for the MBUS clock on A13 57 "allwinner,sun5i-a13-mbus-clk" - for the MBUS clock on A13
58 "allwinner,sun4i-a10-mmc-output-clk" - for the MMC output clock on A10 58 "allwinner,sun4i-a10-mmc-clk" - for the MMC clock
59 "allwinner,sun4i-a10-mmc-sample-clk" - for the MMC sample clock on A10 59 "allwinner,sun9i-a80-mmc-clk" - for mmc module clocks on A80
60 "allwinner,sun9i-a80-mmc-config-clk" - for mmc gates + resets on A80
60 "allwinner,sun4i-a10-mod0-clk" - for the module 0 family of clocks 61 "allwinner,sun4i-a10-mod0-clk" - for the module 0 family of clocks
62 "allwinner,sun9i-a80-mod0-clk" - for module 0 (storage) clocks on A80
61 "allwinner,sun8i-a23-mbus-clk" - for the MBUS clock on A23 63 "allwinner,sun8i-a23-mbus-clk" - for the MBUS clock on A23
62 "allwinner,sun7i-a20-out-clk" - for the external output clocks 64 "allwinner,sun7i-a20-out-clk" - for the external output clocks
63 "allwinner,sun7i-a20-gmac-clk" - for the GMAC clock module on A20/A31 65 "allwinner,sun7i-a20-gmac-clk" - for the GMAC clock module on A20/A31
@@ -73,7 +75,9 @@ Required properties for all clocks:
73- #clock-cells : from common clock binding; shall be set to 0 except for 75- #clock-cells : from common clock binding; shall be set to 0 except for
74 the following compatibles where it shall be set to 1: 76 the following compatibles where it shall be set to 1:
75 "allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk", 77 "allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk",
76 "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk" 78 "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk",
79 "allwinner,*-usb-clk", "allwinner,*-mmc-clk",
80 "allwinner,*-mmc-config-clk"
77- clock-output-names : shall be the corresponding names of the outputs. 81- clock-output-names : shall be the corresponding names of the outputs.
78 If the clock module only has one output, the name shall be the 82 If the clock module only has one output, the name shall be the
79 module name. 83 module name.
@@ -81,6 +85,10 @@ Required properties for all clocks:
81And "allwinner,*-usb-clk" clocks also require: 85And "allwinner,*-usb-clk" clocks also require:
82- reset-cells : shall be set to 1 86- reset-cells : shall be set to 1
83 87
88The "allwinner,sun9i-a80-mmc-config-clk" clock also requires:
89- #reset-cells : shall be set to 1
90- resets : shall be the reset control phandle for the mmc block.
91
84For "allwinner,sun7i-a20-gmac-clk", the parent clocks shall be fixed rate 92For "allwinner,sun7i-a20-gmac-clk", the parent clocks shall be fixed rate
85dummy clocks at 25 MHz and 125 MHz, respectively. See example. 93dummy clocks at 25 MHz and 125 MHz, respectively. See example.
86 94
@@ -95,6 +103,14 @@ For "allwinner,sun6i-a31-pll6-clk", there are 2 outputs. The first output
95is the normal PLL6 output, or "pll6". The second output is rate doubled 103is the normal PLL6 output, or "pll6". The second output is rate doubled
96PLL6, or "pll6x2". 104PLL6, or "pll6x2".
97 105
106The "allwinner,*-mmc-clk" clocks have three different outputs: the
107main clock, with the ID 0, and the output and sample clocks, with the
108IDs 1 and 2, respectively.
109
110The "allwinner,sun9i-a80-mmc-config-clk" clock has one clock/reset output
111per mmc controller. The number of outputs is determined by the size of
112the address block, which is related to the overall mmc block.
113
98For example: 114For example:
99 115
100osc24M: clk@01c20050 { 116osc24M: clk@01c20050 {
@@ -138,11 +154,11 @@ cpu: cpu@01c20054 {
138}; 154};
139 155
140mmc0_clk: clk@01c20088 { 156mmc0_clk: clk@01c20088 {
141 #clock-cells = <0>; 157 #clock-cells = <1>;
142 compatible = "allwinner,sun4i-mod0-clk"; 158 compatible = "allwinner,sun4i-a10-mmc-clk";
143 reg = <0x01c20088 0x4>; 159 reg = <0x01c20088 0x4>;
144 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 160 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
145 clock-output-names = "mmc0"; 161 clock-output-names = "mmc0", "mmc0_output", "mmc0_sample";
146}; 162};
147 163
148mii_phy_tx_clk: clk@2 { 164mii_phy_tx_clk: clk@2 {
@@ -170,3 +186,16 @@ gmac_clk: clk@01c20164 {
170 clocks = <&mii_phy_tx_clk>, <&gmac_int_tx_clk>; 186 clocks = <&mii_phy_tx_clk>, <&gmac_int_tx_clk>;
171 clock-output-names = "gmac"; 187 clock-output-names = "gmac";
172}; 188};
189
190mmc_config_clk: clk@01c13000 {
191 compatible = "allwinner,sun9i-a80-mmc-config-clk";
192 reg = <0x01c13000 0x10>;
193 clocks = <&ahb0_gates 8>;
194 clock-names = "ahb";
195 resets = <&ahb0_resets 8>;
196 reset-names = "ahb";
197 #clock-cells = <1>;
198 #reset-cells = <1>;
199 clock-output-names = "mmc0_config", "mmc1_config",
200 "mmc2_config", "mmc3_config";
201};
diff --git a/Documentation/devicetree/bindings/clock/ti,cdce706.txt b/Documentation/devicetree/bindings/clock/ti,cdce706.txt
new file mode 100644
index 000000000000..616836e7e1e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ti,cdce706.txt
@@ -0,0 +1,42 @@
1Bindings for Texas Instruments CDCE706 programmable 3-PLL clock
2synthesizer/multiplier/divider.
3
4Reference: http://www.ti.com/lit/ds/symlink/cdce706.pdf
5
6I2C device node required properties:
7- compatible: shall be "ti,cdce706".
8- reg: i2c device address, shall be in range [0x68...0x6b].
9- #clock-cells: from common clock binding; shall be set to 1.
10- clocks: from common clock binding; list of parent clock
11 handles, shall be reference clock(s) connected to CLK_IN0
12 and CLK_IN1 pins.
13- clock-names: shall be clk_in0 and/or clk_in1. Use clk_in0
14 in case of crystal oscillator or differential signal input
15 configuration. Use clk_in0 and clk_in1 in case of independent
16 single-ended LVCMOS inputs configuration.
17
18Example:
19
20 clocks {
21 clk54: clk54 {
22 #clock-cells = <0>;
23 compatible = "fixed-clock";
24 clock-frequency = <54000000>;
25 };
26 };
27 ...
28 i2c0: i2c-master@0d090000 {
29 ...
30 cdce706: clock-synth@69 {
31 compatible = "ti,cdce706";
32 #clock-cells = <1>;
33 reg = <0x69>;
34 clocks = <&clk54>;
35 clock-names = "clk_in0";
36 };
37 };
38 ...
39 simple-audio-card,codec {
40 ...
41 clocks = <&cdce706 4>;
42 };
diff --git a/Documentation/devicetree/bindings/clock/ti/fapll.txt b/Documentation/devicetree/bindings/clock/ti/fapll.txt
new file mode 100644
index 000000000000..c19b3f253b8c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ti/fapll.txt
@@ -0,0 +1,33 @@
1Binding for Texas Instruments FAPLL clock.
2
3Binding status: Unstable - ABI compatibility may be broken in the future
4
5This binding uses the common clock binding[1]. It assumes a
6register-mapped FAPLL with usually two selectable input clocks
7(reference clock and bypass clock), and one or more child
8syntesizers.
9
10[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
11
12Required properties:
13- compatible : shall be "ti,dm816-fapll-clock"
14- #clock-cells : from common clock binding; shall be set to 0.
15- clocks : link phandles of parent clocks (clk-ref and clk-bypass)
16- reg : address and length of the register set for controlling the FAPLL.
17
18Examples:
19 main_fapll: main_fapll {
20 #clock-cells = <1>;
21 compatible = "ti,dm816-fapll-clock";
22 reg = <0x400 0x40>;
23 clocks = <&sys_clkin_ck &sys_clkin_ck>;
24 clock-indices = <1>, <2>, <3>, <4>, <5>,
25 <6>, <7>;
26 clock-output-names = "main_pll_clk1",
27 "main_pll_clk2",
28 "main_pll_clk3",
29 "main_pll_clk4",
30 "main_pll_clk5",
31 "main_pll_clk6",
32 "main_pll_clk7";
33 };
diff --git a/Documentation/devicetree/bindings/dma/img-mdc-dma.txt b/Documentation/devicetree/bindings/dma/img-mdc-dma.txt
new file mode 100644
index 000000000000..28c1341db346
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/img-mdc-dma.txt
@@ -0,0 +1,57 @@
1* IMG Multi-threaded DMA Controller (MDC)
2
3Required properties:
4- compatible: Must be "img,pistachio-mdc-dma".
5- reg: Must contain the base address and length of the MDC registers.
6- interrupts: Must contain all the per-channel DMA interrupts.
7- clocks: Must contain an entry for each entry in clock-names.
8 See ../clock/clock-bindings.txt for details.
9- clock-names: Must include the following entries:
10 - sys: MDC system interface clock.
11- img,cr-periph: Must contain a phandle to the peripheral control syscon
12 node which contains the DMA request to channel mapping registers.
13- img,max-burst-multiplier: Must be the maximum supported burst size multiplier.
14 The maximum burst size is this value multiplied by the hardware-reported bus
15 width.
16- #dma-cells: Must be 3:
17 - The first cell is the peripheral's DMA request line.
18 - The second cell is a bitmap specifying to which channels the DMA request
19 line may be mapped (i.e. bit N set indicates channel N is usable).
20 - The third cell is the thread ID to be used by the channel.
21
22Optional properties:
23- dma-channels: Number of supported DMA channels, up to 32. If not specified
24 the number reported by the hardware is used.
25
26Example:
27
28mdc: dma-controller@18143000 {
29 compatible = "img,pistachio-mdc-dma";
30 reg = <0x18143000 0x1000>;
31 interrupts = <GIC_SHARED 27 IRQ_TYPE_LEVEL_HIGH>,
32 <GIC_SHARED 28 IRQ_TYPE_LEVEL_HIGH>,
33 <GIC_SHARED 29 IRQ_TYPE_LEVEL_HIGH>,
34 <GIC_SHARED 30 IRQ_TYPE_LEVEL_HIGH>,
35 <GIC_SHARED 31 IRQ_TYPE_LEVEL_HIGH>,
36 <GIC_SHARED 32 IRQ_TYPE_LEVEL_HIGH>,
37 <GIC_SHARED 33 IRQ_TYPE_LEVEL_HIGH>,
38 <GIC_SHARED 34 IRQ_TYPE_LEVEL_HIGH>,
39 <GIC_SHARED 35 IRQ_TYPE_LEVEL_HIGH>,
40 <GIC_SHARED 36 IRQ_TYPE_LEVEL_HIGH>,
41 <GIC_SHARED 37 IRQ_TYPE_LEVEL_HIGH>,
42 <GIC_SHARED 38 IRQ_TYPE_LEVEL_HIGH>;
43 clocks = <&system_clk>;
44 clock-names = "sys";
45
46 img,max-burst-multiplier = <16>;
47 img,cr-periph = <&cr_periph>;
48
49 #dma-cells = <3>;
50};
51
52spi@18100f00 {
53 ...
54 dmas = <&mdc 9 0xffffffff 0>, <&mdc 10 0xffffffff 0>;
55 dma-names = "tx", "rx";
56 ...
57};
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
index f7e21b1c2a05..09daeef1ff22 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
@@ -5,9 +5,6 @@ controller instances named DMAC capable of serving multiple clients. Channels
5can be dedicated to specific clients or shared between a large number of 5can be dedicated to specific clients or shared between a large number of
6clients. 6clients.
7 7
8DMA clients are connected to the DMAC ports referenced by an 8-bit identifier
9called MID/RID.
10
11Each DMA client is connected to one dedicated port of the DMAC, identified by 8Each DMA client is connected to one dedicated port of the DMAC, identified by
12an 8-bit port number called the MID/RID. A DMA controller can thus serve up to 9an 8-bit port number called the MID/RID. A DMA controller can thus serve up to
13256 clients in total. When the number of hardware channels is lower than the 10256 clients in total. When the number of hardware channels is lower than the
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
index d58675ea1abf..c261598164a7 100644
--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -38,7 +38,7 @@ Example:
38 chan_allocation_order = <1>; 38 chan_allocation_order = <1>;
39 chan_priority = <1>; 39 chan_priority = <1>;
40 block_size = <0xfff>; 40 block_size = <0xfff>;
41 data_width = <3 3 0 0>; 41 data_width = <3 3>;
42 }; 42 };
43 43
44DMA clients connected to the Designware DMA controller must use the format 44DMA clients connected to the Designware DMA controller must use the format
diff --git a/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt b/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt
new file mode 100644
index 000000000000..81f982ccca31
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt
@@ -0,0 +1,37 @@
1Broadcom iProc I2C controller
2
3Required properties:
4
5- compatible:
6 Must be "brcm,iproc-i2c"
7
8- reg:
9 Define the base and range of the I/O address space that contain the iProc
10 I2C controller registers
11
12- interrupts:
13 Should contain the I2C interrupt
14
15- clock-frequency:
16 This is the I2C bus clock. Need to be either 100000 or 400000
17
18- #address-cells:
19 Always 1 (for I2C addresses)
20
21- #size-cells:
22 Always 0
23
24Example:
25 i2c0: i2c@18008000 {
26 compatible = "brcm,iproc-i2c";
27 reg = <0x18008000 0x100>;
28 #address-cells = <1>;
29 #size-cells = <0>;
30 interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
31 clock-frequency = <100000>;
32
33 codec: wm8750@1a {
34 compatible = "wlf,wm8750";
35 reg = <0x1a>;
36 };
37 };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
index 34a3fb6f8488..cf53d5fba20a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
@@ -16,6 +16,9 @@ Required Properties:
16Optional Properties: 16Optional Properties:
17 17
18 - reset-gpios: Reference to the GPIO connected to the reset input. 18 - reset-gpios: Reference to the GPIO connected to the reset input.
19 - i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all
20 children in idle state. This is necessary for example, if there are several
21 multiplexers on the bus and the devices behind them use same I2C addresses.
19 22
20 23
21Example: 24Example:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-ocores.txt b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt
index 1637c298a1b3..17bef9a34e50 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-ocores.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt
@@ -4,24 +4,60 @@ Required properties:
4- compatible : "opencores,i2c-ocores" or "aeroflexgaisler,i2cmst" 4- compatible : "opencores,i2c-ocores" or "aeroflexgaisler,i2cmst"
5- reg : bus address start and address range size of device 5- reg : bus address start and address range size of device
6- interrupts : interrupt number 6- interrupts : interrupt number
7- clock-frequency : frequency of bus clock in Hz 7- clocks : handle to the controller clock; see the note below.
8 Mutually exclusive with opencores,ip-clock-frequency
9- opencores,ip-clock-frequency: frequency of the controller clock in Hz;
10 see the note below. Mutually exclusive with clocks
8- #address-cells : should be <1> 11- #address-cells : should be <1>
9- #size-cells : should be <0> 12- #size-cells : should be <0>
10 13
11Optional properties: 14Optional properties:
15- clock-frequency : frequency of bus clock in Hz; see the note below.
16 Defaults to 100 KHz when the property is not specified
12- reg-shift : device register offsets are shifted by this value 17- reg-shift : device register offsets are shifted by this value
13- reg-io-width : io register width in bytes (1, 2 or 4) 18- reg-io-width : io register width in bytes (1, 2 or 4)
14- regstep : deprecated, use reg-shift above 19- regstep : deprecated, use reg-shift above
15 20
16Example: 21Note
22clock-frequency property is meant to control the bus frequency for i2c bus
23drivers, but it was incorrectly used to specify i2c controller input clock
24frequency. So the following rules are set to fix this situation:
25- if clock-frequency is present and neither opencores,ip-clock-frequency nor
26 clocks are, then clock-frequency specifies i2c controller clock frequency.
27 This is to keep backwards compatibility with setups using old DTB. i2c bus
28 frequency is fixed at 100 KHz.
29- if clocks is present it specifies i2c controller clock. clock-frequency
30 property specifies i2c bus frequency.
31- if opencores,ip-clock-frequency is present it specifies i2c controller
32 clock frequency. clock-frequency property specifies i2c bus frequency.
17 33
34Examples:
35
36 i2c0: ocores@a0000000 {
37 #address-cells = <1>;
38 #size-cells = <0>;
39 compatible = "opencores,i2c-ocores";
40 reg = <0xa0000000 0x8>;
41 interrupts = <10>;
42 opencores,ip-clock-frequency = <20000000>;
43
44 reg-shift = <0>; /* 8 bit registers */
45 reg-io-width = <1>; /* 8 bit read/write */
46
47 dummy@60 {
48 compatible = "dummy";
49 reg = <0x60>;
50 };
51 };
52or
18 i2c0: ocores@a0000000 { 53 i2c0: ocores@a0000000 {
19 #address-cells = <1>; 54 #address-cells = <1>;
20 #size-cells = <0>; 55 #size-cells = <0>;
21 compatible = "opencores,i2c-ocores"; 56 compatible = "opencores,i2c-ocores";
22 reg = <0xa0000000 0x8>; 57 reg = <0xa0000000 0x8>;
23 interrupts = <10>; 58 interrupts = <10>;
24 clock-frequency = <20000000>; 59 clocks = <&osc>;
60 clock-frequency = <400000>; /* i2c bus frequency 400 KHz */
25 61
26 reg-shift = <0>; /* 8 bit registers */ 62 reg-shift = <0>; /* 8 bit registers */
27 reg-io-width = <1>; /* 8 bit read/write */ 63 reg-io-width = <1>; /* 8 bit read/write */
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
index dde6c22ce91a..f0d71bc52e64 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
@@ -21,6 +21,17 @@ Required on RK3066, RK3188 :
21Optional properties : 21Optional properties :
22 22
23 - clock-frequency : SCL frequency to use (in Hz). If omitted, 100kHz is used. 23 - clock-frequency : SCL frequency to use (in Hz). If omitted, 100kHz is used.
24 - i2c-scl-rising-time-ns : Number of nanoseconds the SCL signal takes to rise
25 (t(r) in I2C specification). If not specified this is assumed to be
26 the maximum the specification allows(1000 ns for Standard-mode,
27 300 ns for Fast-mode) which might cause slightly slower communication.
28 - i2c-scl-falling-time-ns : Number of nanoseconds the SCL signal takes to fall
29 (t(f) in the I2C specification). If not specified this is assumed to
30 be the maximum the specification allows (300 ns) which might cause
31 slightly slower communication.
32 - i2c-sda-falling-time-ns : Number of nanoseconds the SDA signal takes to fall
33 (t(f) in the I2C specification). If not specified we'll use the SCL
34 value since they are the same in nearly all cases.
24 35
25Example: 36Example:
26 37
@@ -39,4 +50,7 @@ i2c0: i2c@2002d000 {
39 50
40 clock-names = "i2c"; 51 clock-names = "i2c";
41 clocks = <&cru PCLK_I2C0>; 52 clocks = <&cru PCLK_I2C0>;
53
54 i2c-scl-rising-time-ns = <800>;
55 i2c-scl-falling-time-ns = <100>;
42}; 56};
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index 4dcd88d5f7ca..aaa8325004d2 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -61,9 +61,8 @@ fsl,sgtl5000 SGTL5000: Ultra Low-Power Audio Codec
61gmt,g751 G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface 61gmt,g751 G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface
62infineon,slb9635tt Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz) 62infineon,slb9635tt Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz)
63infineon,slb9645tt Infineon SLB9645 I2C TPM (new protocol, max 400khz) 63infineon,slb9645tt Infineon SLB9645 I2C TPM (new protocol, max 400khz)
64isl,isl12057 Intersil ISL12057 I2C RTC Chip 64isil,isl12057 Intersil ISL12057 I2C RTC Chip
65isil,isl29028 (deprecated, use isl) 65isil,isl29028 Intersil ISL29028 Ambient Light and Proximity Sensor
66isl,isl29028 Intersil ISL29028 Ambient Light and Proximity Sensor
67maxim,ds1050 5 Bit Programmable, Pulse-Width Modulator 66maxim,ds1050 5 Bit Programmable, Pulse-Width Modulator
68maxim,max1237 Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs 67maxim,max1237 Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
69maxim,max6625 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface 68maxim,max6625 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
diff --git a/Documentation/devicetree/bindings/mfd/da9063.txt b/Documentation/devicetree/bindings/mfd/da9063.txt
new file mode 100644
index 000000000000..42c6fa6f1c9a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/da9063.txt
@@ -0,0 +1,93 @@
1* Dialog DA9063 Power Management Integrated Circuit (PMIC)
2
3DA9093 consists of a large and varied group of sub-devices (I2C Only):
4
5Device Supply Names Description
6------ ------------ -----------
7da9063-regulator : : LDOs & BUCKs
8da9063-rtc : : Real-Time Clock
9da9063-watchdog : : Watchdog
10
11======
12
13Required properties:
14
15- compatible : Should be "dlg,da9063"
16- reg : Specifies the I2C slave address (this defaults to 0x58 but it can be
17 modified to match the chip's OTP settings).
18- interrupt-parent : Specifies the reference to the interrupt controller for
19 the DA9063.
20- interrupts : IRQ line information.
21- interrupt-controller
22
23Sub-nodes:
24
25- regulators : This node defines the settings for the LDOs and BUCKs. The
26 DA9063 regulators are bound using their names listed below:
27
28 bcore1 : BUCK CORE1
29 bcore2 : BUCK CORE2
30 bpro : BUCK PRO
31 bmem : BUCK MEM
32 bio : BUCK IO
33 bperi : BUCK PERI
34 ldo1 : LDO_1
35 ldo2 : LDO_2
36 ldo3 : LDO_3
37 ldo4 : LDO_4
38 ldo5 : LDO_5
39 ldo6 : LDO_6
40 ldo7 : LDO_7
41 ldo8 : LDO_8
42 ldo9 : LDO_9
43 ldo10 : LDO_10
44 ldo11 : LDO_11
45
46 The component follows the standard regulator framework and the bindings
47 details of individual regulator device can be found in:
48 Documentation/devicetree/bindings/regulator/regulator.txt
49
50- rtc : This node defines settings for the Real-Time Clock associated with
51 the DA9063. There are currently no entries in this binding, however
52 compatible = "dlg,da9063-rtc" should be added if a node is created.
53
54- watchdog : This node defines settings for the Watchdog timer associated
55 with the DA9063. There are currently no entries in this binding, however
56 compatible = "dlg,da9063-watchdog" should be added if a node is created.
57
58
59Example:
60
61 pmic0: da9063@58 {
62 compatible = "dlg,da9063"
63 reg = <0x58>;
64 interrupt-parent = <&gpio6>;
65 interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
66 interrupt-controller;
67
68 rtc {
69 compatible = "dlg,da9063-rtc";
70 };
71
72 wdt {
73 compatible = "dlg,da9063-watchdog";
74 };
75
76 regulators {
77 DA9063_BCORE1: bcore1 {
78 regulator-name = "BCORE1";
79 regulator-min-microvolt = <300000>;
80 regulator-max-microvolt = <1570000>;
81 regulator-min-microamp = <500000>;
82 regulator-max-microamp = <2000000>;
83 regulator-boot-on;
84 };
85 DA9063_LDO11: ldo11 {
86 regulator-name = "LDO_11";
87 regulator-min-microvolt = <900000>;
88 regulator-max-microvolt = <3600000>;
89 regulator-boot-on;
90 };
91 };
92 };
93
diff --git a/Documentation/devicetree/bindings/mfd/qcom-rpm.txt b/Documentation/devicetree/bindings/mfd/qcom-rpm.txt
new file mode 100644
index 000000000000..85e31980017a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/qcom-rpm.txt
@@ -0,0 +1,70 @@
1Qualcomm Resource Power Manager (RPM)
2
3This driver is used to interface with the Resource Power Manager (RPM) found in
4various Qualcomm platforms. The RPM allows each component in the system to vote
5for state of the system resources, such as clocks, regulators and bus
6frequencies.
7
8- compatible:
9 Usage: required
10 Value type: <string>
11 Definition: must be one of:
12 "qcom,rpm-apq8064"
13 "qcom,rpm-msm8660"
14 "qcom,rpm-msm8960"
15
16- reg:
17 Usage: required
18 Value type: <prop-encoded-array>
19 Definition: base address and size of the RPM's message ram
20
21- interrupts:
22 Usage: required
23 Value type: <prop-encoded-array>
24 Definition: three entries specifying the RPM's:
25 1. acknowledgement interrupt
26 2. error interrupt
27 3. wakeup interrupt
28
29- interrupt-names:
30 Usage: required
31 Value type: <string-array>
32 Definition: must be the three strings "ack", "err" and "wakeup", in order
33
34- #address-cells:
35 Usage: required
36 Value type: <u32>
37 Definition: must be 1
38
39- #size-cells:
40 Usage: required
41 Value type: <u32>
42 Definition: must be 0
43
44- qcom,ipc:
45 Usage: required
46 Value type: <prop-encoded-array>
47
48 Definition: three entries specifying the outgoing ipc bit used for
49 signaling the RPM:
50 - phandle to a syscon node representing the apcs registers
51 - u32 representing offset to the register within the syscon
52 - u32 representing the ipc bit within the register
53
54
55= EXAMPLE
56
57 #include <dt-bindings/mfd/qcom-rpm.h>
58
59 rpm@108000 {
60 compatible = "qcom,rpm-msm8960";
61 reg = <0x108000 0x1000>;
62 qcom,ipc = <&apcs 0x8 2>;
63
64 interrupts = <0 19 0>, <0 21 0>, <0 22 0>;
65 interrupt-names = "ack", "err", "wakeup";
66
67 #address-cells = <1>;
68 #size-cells = <0>;
69 };
70
diff --git a/Documentation/devicetree/bindings/mips/cavium/cib.txt b/Documentation/devicetree/bindings/mips/cavium/cib.txt
new file mode 100644
index 000000000000..f39a1aa2852b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/cavium/cib.txt
@@ -0,0 +1,43 @@
1* Cavium Interrupt Bus widget
2
3Properties:
4- compatible: "cavium,octeon-7130-cib"
5
6 Compatibility with cn70XX SoCs.
7
8- interrupt-controller: This is an interrupt controller.
9
10- reg: Two elements consisting of the addresses of the RAW and EN
11 registers of the CIB block
12
13- cavium,max-bits: The index (zero based) of the highest numbered bit
14 in the CIB block.
15
16- interrupt-parent: Always the CIU on the SoC.
17
18- interrupts: The CIU line to which the CIB block is connected.
19
20- #interrupt-cells: Must be <2>. The first cell is the bit within the
21 CIB. The second cell specifies the triggering semantics of the
22 line.
23
24Example:
25
26 interrupt-controller@107000000e000 {
27 compatible = "cavium,octeon-7130-cib";
28 reg = <0x10700 0x0000e000 0x0 0x8>, /* RAW */
29 <0x10700 0x0000e100 0x0 0x8>; /* EN */
30 cavium,max-bits = <23>;
31
32 interrupt-controller;
33 interrupt-parent = <&ciu>;
34 interrupts = <1 24>;
35 /* Interrupts are specified by two parts:
36 * 1) Bit number in the CIB* registers
37 * 2) Triggering (1 - edge rising
38 * 2 - edge falling
39 * 4 - level active high
40 * 8 - level active low)
41 */
42 #interrupt-cells = <2>;
43 };
diff --git a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
index 91b3a3467150..4bf41d833804 100644
--- a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
@@ -10,8 +10,8 @@ Absolute maximum transfer rate is 200MB/s
10Required properties: 10Required properties:
11 - compatible : "allwinner,sun4i-a10-mmc" or "allwinner,sun5i-a13-mmc" 11 - compatible : "allwinner,sun4i-a10-mmc" or "allwinner,sun5i-a13-mmc"
12 - reg : mmc controller base registers 12 - reg : mmc controller base registers
13 - clocks : a list with 2 phandle + clock specifier pairs 13 - clocks : a list with 4 phandle + clock specifier pairs
14 - clock-names : must contain "ahb" and "mmc" 14 - clock-names : must contain "ahb", "mmc", "output" and "sample"
15 - interrupts : mmc controller interrupt 15 - interrupts : mmc controller interrupt
16 16
17Optional properties: 17Optional properties:
@@ -25,8 +25,8 @@ Examples:
25 mmc0: mmc@01c0f000 { 25 mmc0: mmc@01c0f000 {
26 compatible = "allwinner,sun5i-a13-mmc"; 26 compatible = "allwinner,sun5i-a13-mmc";
27 reg = <0x01c0f000 0x1000>; 27 reg = <0x01c0f000 0x1000>;
28 clocks = <&ahb_gates 8>, <&mmc0_clk>; 28 clocks = <&ahb_gates 8>, <&mmc0_clk>, <&mmc0_output_clk>, <&mmc0_sample_clk>;
29 clock-names = "ahb", "mod"; 29 clock-names = "ahb", "mod", "output", "sample";
30 interrupts = <0 32 4>; 30 interrupts = <0 32 4>;
31 status = "disabled"; 31 status = "disabled";
32 }; 32 };
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
index 1fe6dde98499..7d4c8eb775a5 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -1,7 +1,7 @@
1Atmel NAND flash 1Atmel NAND flash
2 2
3Required properties: 3Required properties:
4- compatible : "atmel,at91rm9200-nand". 4- compatible : should be "atmel,at91rm9200-nand" or "atmel,sama5d4-nand".
5- reg : should specify localbus address and size used for the chip, 5- reg : should specify localbus address and size used for the chip,
6 and hardware ECC controller if available. 6 and hardware ECC controller if available.
7 If the hardware ECC is PMECC, it should contain address and size for 7 If the hardware ECC is PMECC, it should contain address and size for
diff --git a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
index 823d13412195..4461dc71cb10 100644
--- a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
+++ b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
@@ -1,7 +1,7 @@
1* Freescale Quad Serial Peripheral Interface(QuadSPI) 1* Freescale Quad Serial Peripheral Interface(QuadSPI)
2 2
3Required properties: 3Required properties:
4 - compatible : Should be "fsl,vf610-qspi" 4 - compatible : Should be "fsl,vf610-qspi" or "fsl,imx6sx-qspi"
5 - reg : the first contains the register location and length, 5 - reg : the first contains the register location and length,
6 the second contains the memory mapping address and length 6 the second contains the memory mapping address and length
7 - reg-names: Should contain the reg names "QuadSPI" and "QuadSPI-memory" 7 - reg-names: Should contain the reg names "QuadSPI" and "QuadSPI-memory"
diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
index a011fdf61dbf..d02acaff3c35 100644
--- a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
@@ -1,7 +1,7 @@
1* Freescale General-Purpose Media Interface (GPMI) 1* Freescale General-Purpose Media Interface (GPMI)
2 2
3The GPMI nand controller provides an interface to control the 3The GPMI nand controller provides an interface to control the
4NAND flash chips. We support only one NAND chip now. 4NAND flash chips.
5 5
6Required properties: 6Required properties:
7 - compatible : should be "fsl,<chip>-gpmi-nand" 7 - compatible : should be "fsl,<chip>-gpmi-nand"
diff --git a/Documentation/devicetree/bindings/mtd/hisi504-nand.txt b/Documentation/devicetree/bindings/mtd/hisi504-nand.txt
new file mode 100644
index 000000000000..2e35f0662912
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/hisi504-nand.txt
@@ -0,0 +1,47 @@
1Hisilicon Hip04 Soc NAND controller DT binding
2
3Required properties:
4
5- compatible: Should be "hisilicon,504-nfc".
6- reg: The first contains base physical address and size of
7 NAND controller's registers. The second contains base
8 physical address and size of NAND controller's buffer.
9- interrupts: Interrupt number for nfc.
10- nand-bus-width: See nand.txt.
11- nand-ecc-mode: Support none and hw ecc mode.
12- #address-cells: Partition address, should be set 1.
13- #size-cells: Partition size, should be set 1.
14
15Optional properties:
16
17- nand-ecc-strength: Number of bits to correct per ECC step.
18- nand-ecc-step-size: Number of data bytes covered by a single ECC step.
19
20The following ECC strength and step size are currently supported:
21
22 - nand-ecc-strength = <16>, nand-ecc-step-size = <1024>
23
24Flash chip may optionally contain additional sub-nodes describing partitions of
25the address space. See partition.txt for more detail.
26
27Example:
28
29 nand: nand@4020000 {
30 compatible = "hisilicon,504-nfc";
31 reg = <0x4020000 0x10000>, <0x5000000 0x1000>;
32 interrupts = <0 379 4>;
33 nand-bus-width = <8>;
34 nand-ecc-mode = "hw";
35 nand-ecc-strength = <16>;
36 nand-ecc-step-size = <1024>;
37 #address-cells = <1>;
38 #size-cells = <1>;
39
40 partition@0 {
41 label = "nand_text";
42 reg = <0x00000000 0x00400000>;
43 };
44
45 ...
46
47 };
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
index 6b9f680cb579..4a0a48bf4ecb 100644
--- a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
+++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
@@ -36,6 +36,11 @@ are defined:
36 - vendor-id : Contains the flash chip's vendor id (1 byte). 36 - vendor-id : Contains the flash chip's vendor id (1 byte).
37 - device-id : Contains the flash chip's device id (1 byte). 37 - device-id : Contains the flash chip's device id (1 byte).
38 38
39For ROM compatible devices (and ROM fallback from cfi-flash), the following
40additional (optional) property is defined:
41
42 - erase-size : The chip's physical erase block size in bytes.
43
39The device tree may optionally contain sub-nodes describing partitions of the 44The device tree may optionally contain sub-nodes describing partitions of the
40address space. See partition.txt for more detail. 45address space. See partition.txt for more detail.
41 46
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
index 33df3932168e..8db32384a486 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
@@ -27,6 +27,8 @@ property is used.
27- amd,serdes-cdr-rate: CDR rate speed selection 27- amd,serdes-cdr-rate: CDR rate speed selection
28- amd,serdes-pq-skew: PQ (data sampling) skew 28- amd,serdes-pq-skew: PQ (data sampling) skew
29- amd,serdes-tx-amp: TX amplitude boost 29- amd,serdes-tx-amp: TX amplitude boost
30- amd,serdes-dfe-tap-config: DFE taps available to run
31- amd,serdes-dfe-tap-enable: DFE taps to enable
30 32
31Example: 33Example:
32 xgbe_phy@e1240800 { 34 xgbe_phy@e1240800 {
@@ -41,4 +43,6 @@ Example:
41 amd,serdes-cdr-rate = <2>, <2>, <7>; 43 amd,serdes-cdr-rate = <2>, <2>, <7>;
42 amd,serdes-pq-skew = <10>, <10>, <30>; 44 amd,serdes-pq-skew = <10>, <10>, <30>;
43 amd,serdes-tx-amp = <15>, <15>, <10>; 45 amd,serdes-tx-amp = <15>, <15>, <10>;
46 amd,serdes-dfe-tap-config = <3>, <3>, <1>;
47 amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
44 }; 48 };
diff --git a/Documentation/devicetree/bindings/pwm/img-pwm.txt b/Documentation/devicetree/bindings/pwm/img-pwm.txt
new file mode 100644
index 000000000000..fade5f26fcac
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/img-pwm.txt
@@ -0,0 +1,24 @@
1*Imagination Technologies PWM DAC driver
2
3Required properties:
4 - compatible: Should be "img,pistachio-pwm"
5 - reg: Should contain physical base address and length of pwm registers.
6 - clocks: Must contain an entry for each entry in clock-names.
7 See ../clock/clock-bindings.txt for details.
8 - clock-names: Must include the following entries.
9 - pwm: PWM operating clock.
10 - sys: PWM system interface clock.
11 - #pwm-cells: Should be 2. See pwm.txt in this directory for the
12 description of the cells format.
13 - img,cr-periph: Must contain a phandle to the peripheral control
14 syscon node which contains PWM control registers.
15
16Example:
17 pwm: pwm@18101300 {
18 compatible = "img,pistachio-pwm";
19 reg = <0x18101300 0x100>;
20 clocks = <&pwm_clk>, <&system_clk>;
21 clock-names = "pwm", "sys";
22 #pwm-cells = <2>;
23 img,cr-periph = <&cr_periph>;
24 };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt b/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt
new file mode 100644
index 000000000000..ae0273e19506
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt
@@ -0,0 +1,20 @@
1Allwinner sun4i and sun7i SoC PWM controller
2
3Required properties:
4 - compatible: should be one of:
5 - "allwinner,sun4i-a10-pwm"
6 - "allwinner,sun7i-a20-pwm"
7 - reg: physical base address and length of the controller's registers
8 - #pwm-cells: should be 3. See pwm.txt in this directory for a description of
9 the cells format.
10 - clocks: From common clock binding, handle to the parent clock.
11
12Example:
13
14 pwm: pwm@01c20e00 {
15 compatible = "allwinner,sun7i-a20-pwm";
16 reg = <0x01c20e00 0xc>;
17 clocks = <&osc24M>;
18 #pwm-cells = <3>;
19 status = "disabled";
20 };
diff --git a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt
index ae738f562acc..695150a4136b 100644
--- a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt
@@ -12,6 +12,7 @@
12 "samsung,exynos5420-tmu-ext-triminfo" for TMU channels 2, 3 and 4 12 "samsung,exynos5420-tmu-ext-triminfo" for TMU channels 2, 3 and 4
13 Exynos5420 (Must pass triminfo base and triminfo clock) 13 Exynos5420 (Must pass triminfo base and triminfo clock)
14 "samsung,exynos5440-tmu" 14 "samsung,exynos5440-tmu"
15 "samsung,exynos7-tmu"
15- interrupt-parent : The phandle for the interrupt controller 16- interrupt-parent : The phandle for the interrupt controller
16- reg : Address range of the thermal registers. For soc's which has multiple 17- reg : Address range of the thermal registers. For soc's which has multiple
17 instances of TMU and some registers are shared across all TMU's like 18 instances of TMU and some registers are shared across all TMU's like
@@ -32,13 +33,28 @@
32- clocks : The main clocks for TMU device 33- clocks : The main clocks for TMU device
33 -- 1. operational clock for TMU channel 34 -- 1. operational clock for TMU channel
34 -- 2. optional clock to access the shared registers of TMU channel 35 -- 2. optional clock to access the shared registers of TMU channel
36 -- 3. optional special clock for functional operation
35- clock-names : Thermal system clock name 37- clock-names : Thermal system clock name
36 -- "tmu_apbif" operational clock for current TMU channel 38 -- "tmu_apbif" operational clock for current TMU channel
37 -- "tmu_triminfo_apbif" clock to access the shared triminfo register 39 -- "tmu_triminfo_apbif" clock to access the shared triminfo register
38 for current TMU channel 40 for current TMU channel
41 -- "tmu_sclk" clock for functional operation of the current TMU
42 channel
39- vtmu-supply: This entry is optional and provides the regulator node supplying 43- vtmu-supply: This entry is optional and provides the regulator node supplying
40 voltage to TMU. If needed this entry can be placed inside 44 voltage to TMU. If needed this entry can be placed inside
41 board/platform specific dts file. 45 board/platform specific dts file.
46Following properties are mandatory (depending on SoC):
47- samsung,tmu_gain: Gain value for internal TMU operation.
48- samsung,tmu_reference_voltage: Value of TMU IP block's reference voltage
49- samsung,tmu_noise_cancel_mode: Mode for noise cancellation
50- samsung,tmu_efuse_value: Default level of temperature - it is needed when
51 in factory fusing produced wrong value
52- samsung,tmu_min_efuse_value: Minimum temperature fused value
53- samsung,tmu_max_efuse_value: Maximum temperature fused value
54- samsung,tmu_first_point_trim: First point trimming value
55- samsung,tmu_second_point_trim: Second point trimming value
56- samsung,tmu_default_temp_offset: Default temperature offset
57- samsung,tmu_cal_type: Callibration type
42 58
43Example 1): 59Example 1):
44 60
@@ -51,6 +67,7 @@ Example 1):
51 clock-names = "tmu_apbif"; 67 clock-names = "tmu_apbif";
52 status = "disabled"; 68 status = "disabled";
53 vtmu-supply = <&tmu_regulator_node>; 69 vtmu-supply = <&tmu_regulator_node>;
70 #include "exynos4412-tmu-sensor-conf.dtsi"
54 }; 71 };
55 72
56Example 2): 73Example 2):
@@ -61,6 +78,7 @@ Example 2):
61 interrupts = <0 58 0>; 78 interrupts = <0 58 0>;
62 clocks = <&clock 21>; 79 clocks = <&clock 21>;
63 clock-names = "tmu_apbif"; 80 clock-names = "tmu_apbif";
81 #include "exynos5440-tmu-sensor-conf.dtsi"
64 }; 82 };
65 83
66Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") 84Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
@@ -70,6 +88,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
70 interrupts = <0 184 0>; 88 interrupts = <0 184 0>;
71 clocks = <&clock 318>, <&clock 318>; 89 clocks = <&clock 318>, <&clock 318>;
72 clock-names = "tmu_apbif", "tmu_triminfo_apbif"; 90 clock-names = "tmu_apbif", "tmu_triminfo_apbif";
91 #include "exynos4412-tmu-sensor-conf.dtsi"
73 }; 92 };
74 93
75 tmu_cpu3: tmu@1006c000 { 94 tmu_cpu3: tmu@1006c000 {
@@ -78,6 +97,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
78 interrupts = <0 185 0>; 97 interrupts = <0 185 0>;
79 clocks = <&clock 318>, <&clock 319>; 98 clocks = <&clock 318>, <&clock 319>;
80 clock-names = "tmu_apbif", "tmu_triminfo_apbif"; 99 clock-names = "tmu_apbif", "tmu_triminfo_apbif";
100 #include "exynos4412-tmu-sensor-conf.dtsi"
81 }; 101 };
82 102
83 tmu_gpu: tmu@100a0000 { 103 tmu_gpu: tmu@100a0000 {
@@ -86,6 +106,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
86 interrupts = <0 215 0>; 106 interrupts = <0 215 0>;
87 clocks = <&clock 319>, <&clock 318>; 107 clocks = <&clock 319>, <&clock 318>;
88 clock-names = "tmu_apbif", "tmu_triminfo_apbif"; 108 clock-names = "tmu_apbif", "tmu_triminfo_apbif";
109 #include "exynos4412-tmu-sensor-conf.dtsi"
89 }; 110 };
90 111
91Note: For multi-instance tmu each instance should have an alias correctly 112Note: For multi-instance tmu each instance should have an alias correctly
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt
index f5db6b72a36f..29fe0bfae38e 100644
--- a/Documentation/devicetree/bindings/thermal/thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/thermal.txt
@@ -251,24 +251,24 @@ ocp {
251}; 251};
252 252
253thermal-zones { 253thermal-zones {
254 cpu-thermal: cpu-thermal { 254 cpu_thermal: cpu-thermal {
255 polling-delay-passive = <250>; /* milliseconds */ 255 polling-delay-passive = <250>; /* milliseconds */
256 polling-delay = <1000>; /* milliseconds */ 256 polling-delay = <1000>; /* milliseconds */
257 257
258 thermal-sensors = <&bandgap0>; 258 thermal-sensors = <&bandgap0>;
259 259
260 trips { 260 trips {
261 cpu-alert0: cpu-alert { 261 cpu_alert0: cpu-alert0 {
262 temperature = <90000>; /* millicelsius */ 262 temperature = <90000>; /* millicelsius */
263 hysteresis = <2000>; /* millicelsius */ 263 hysteresis = <2000>; /* millicelsius */
264 type = "active"; 264 type = "active";
265 }; 265 };
266 cpu-alert1: cpu-alert { 266 cpu_alert1: cpu-alert1 {
267 temperature = <100000>; /* millicelsius */ 267 temperature = <100000>; /* millicelsius */
268 hysteresis = <2000>; /* millicelsius */ 268 hysteresis = <2000>; /* millicelsius */
269 type = "passive"; 269 type = "passive";
270 }; 270 };
271 cpu-crit: cpu-crit { 271 cpu_crit: cpu-crit {
272 temperature = <125000>; /* millicelsius */ 272 temperature = <125000>; /* millicelsius */
273 hysteresis = <2000>; /* millicelsius */ 273 hysteresis = <2000>; /* millicelsius */
274 type = "critical"; 274 type = "critical";
@@ -277,17 +277,17 @@ thermal-zones {
277 277
278 cooling-maps { 278 cooling-maps {
279 map0 { 279 map0 {
280 trip = <&cpu-alert0>; 280 trip = <&cpu_alert0>;
281 cooling-device = <&fan0 THERMAL_NO_LIMITS 4>; 281 cooling-device = <&fan0 THERMAL_NO_LIMIT 4>;
282 }; 282 };
283 map1 { 283 map1 {
284 trip = <&cpu-alert1>; 284 trip = <&cpu_alert1>;
285 cooling-device = <&fan0 5 THERMAL_NO_LIMITS>; 285 cooling-device = <&fan0 5 THERMAL_NO_LIMIT>;
286 }; 286 };
287 map2 { 287 map2 {
288 trip = <&cpu-alert1>; 288 trip = <&cpu_alert1>;
289 cooling-device = 289 cooling-device =
290 <&cpu0 THERMAL_NO_LIMITS THERMAL_NO_LIMITS>; 290 <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
291 }; 291 };
292 }; 292 };
293 }; 293 };
@@ -298,13 +298,13 @@ used to monitor the zone 'cpu-thermal' using its sole sensor. A fan
298device (fan0) is controlled via I2C bus 1, at address 0x48, and has ten 298device (fan0) is controlled via I2C bus 1, at address 0x48, and has ten
299different cooling states 0-9. It is used to remove the heat out of 299different cooling states 0-9. It is used to remove the heat out of
300the thermal zone 'cpu-thermal' using its cooling states 300the thermal zone 'cpu-thermal' using its cooling states
301from its minimum to 4, when it reaches trip point 'cpu-alert0' 301from its minimum to 4, when it reaches trip point 'cpu_alert0'
302at 90C, as an example of active cooling. The same cooling device is used at 302at 90C, as an example of active cooling. The same cooling device is used at
303'cpu-alert1', but from 5 to its maximum state. The cpu@0 device is also 303'cpu_alert1', but from 5 to its maximum state. The cpu@0 device is also
304linked to the same thermal zone, 'cpu-thermal', as a passive cooling device, 304linked to the same thermal zone, 'cpu-thermal', as a passive cooling device,
305using all its cooling states at trip point 'cpu-alert1', 305using all its cooling states at trip point 'cpu_alert1',
306which is a trip point at 100C. On the thermal zone 'cpu-thermal', at the 306which is a trip point at 100C. On the thermal zone 'cpu-thermal', at the
307temperature of 125C, represented by the trip point 'cpu-crit', the silicon 307temperature of 125C, represented by the trip point 'cpu_crit', the silicon
308is not reliable anymore. 308is not reliable anymore.
309 309
310(b) - IC with several internal sensors 310(b) - IC with several internal sensors
@@ -329,7 +329,7 @@ ocp {
329}; 329};
330 330
331thermal-zones { 331thermal-zones {
332 cpu-thermal: cpu-thermal { 332 cpu_thermal: cpu-thermal {
333 polling-delay-passive = <250>; /* milliseconds */ 333 polling-delay-passive = <250>; /* milliseconds */
334 polling-delay = <1000>; /* milliseconds */ 334 polling-delay = <1000>; /* milliseconds */
335 335
@@ -338,12 +338,12 @@ thermal-zones {
338 338
339 trips { 339 trips {
340 /* each zone within the SoC may have its own trips */ 340 /* each zone within the SoC may have its own trips */
341 cpu-alert: cpu-alert { 341 cpu_alert: cpu-alert {
342 temperature = <100000>; /* millicelsius */ 342 temperature = <100000>; /* millicelsius */
343 hysteresis = <2000>; /* millicelsius */ 343 hysteresis = <2000>; /* millicelsius */
344 type = "passive"; 344 type = "passive";
345 }; 345 };
346 cpu-crit: cpu-crit { 346 cpu_crit: cpu-crit {
347 temperature = <125000>; /* millicelsius */ 347 temperature = <125000>; /* millicelsius */
348 hysteresis = <2000>; /* millicelsius */ 348 hysteresis = <2000>; /* millicelsius */
349 type = "critical"; 349 type = "critical";
@@ -356,7 +356,7 @@ thermal-zones {
356 }; 356 };
357 }; 357 };
358 358
359 gpu-thermal: gpu-thermal { 359 gpu_thermal: gpu-thermal {
360 polling-delay-passive = <120>; /* milliseconds */ 360 polling-delay-passive = <120>; /* milliseconds */
361 polling-delay = <1000>; /* milliseconds */ 361 polling-delay = <1000>; /* milliseconds */
362 362
@@ -365,12 +365,12 @@ thermal-zones {
365 365
366 trips { 366 trips {
367 /* each zone within the SoC may have its own trips */ 367 /* each zone within the SoC may have its own trips */
368 gpu-alert: gpu-alert { 368 gpu_alert: gpu-alert {
369 temperature = <90000>; /* millicelsius */ 369 temperature = <90000>; /* millicelsius */
370 hysteresis = <2000>; /* millicelsius */ 370 hysteresis = <2000>; /* millicelsius */
371 type = "passive"; 371 type = "passive";
372 }; 372 };
373 gpu-crit: gpu-crit { 373 gpu_crit: gpu-crit {
374 temperature = <105000>; /* millicelsius */ 374 temperature = <105000>; /* millicelsius */
375 hysteresis = <2000>; /* millicelsius */ 375 hysteresis = <2000>; /* millicelsius */
376 type = "critical"; 376 type = "critical";
@@ -383,7 +383,7 @@ thermal-zones {
383 }; 383 };
384 }; 384 };
385 385
386 dsp-thermal: dsp-thermal { 386 dsp_thermal: dsp-thermal {
387 polling-delay-passive = <50>; /* milliseconds */ 387 polling-delay-passive = <50>; /* milliseconds */
388 polling-delay = <1000>; /* milliseconds */ 388 polling-delay = <1000>; /* milliseconds */
389 389
@@ -392,12 +392,12 @@ thermal-zones {
392 392
393 trips { 393 trips {
394 /* each zone within the SoC may have its own trips */ 394 /* each zone within the SoC may have its own trips */
395 dsp-alert: gpu-alert { 395 dsp_alert: dsp-alert {
396 temperature = <90000>; /* millicelsius */ 396 temperature = <90000>; /* millicelsius */
397 hysteresis = <2000>; /* millicelsius */ 397 hysteresis = <2000>; /* millicelsius */
398 type = "passive"; 398 type = "passive";
399 }; 399 };
400 dsp-crit: gpu-crit { 400 dsp_crit: gpu-crit {
401 temperature = <135000>; /* millicelsius */ 401 temperature = <135000>; /* millicelsius */
402 hysteresis = <2000>; /* millicelsius */ 402 hysteresis = <2000>; /* millicelsius */
403 type = "critical"; 403 type = "critical";
@@ -457,7 +457,7 @@ ocp {
457}; 457};
458 458
459thermal-zones { 459thermal-zones {
460 cpu-thermal: cpu-thermal { 460 cpu_thermal: cpu-thermal {
461 polling-delay-passive = <250>; /* milliseconds */ 461 polling-delay-passive = <250>; /* milliseconds */
462 polling-delay = <1000>; /* milliseconds */ 462 polling-delay = <1000>; /* milliseconds */
463 463
@@ -508,7 +508,7 @@ with many sensors and many cooling devices.
508 /* 508 /*
509 * An IC with several temperature sensor. 509 * An IC with several temperature sensor.
510 */ 510 */
511 adc-dummy: sensor@0x50 { 511 adc_dummy: sensor@0x50 {
512 ... 512 ...
513 #thermal-sensor-cells = <1>; /* sensor internal ID */ 513 #thermal-sensor-cells = <1>; /* sensor internal ID */
514 }; 514 };
@@ -520,7 +520,7 @@ thermal-zones {
520 polling-delay = <2500>; /* milliseconds */ 520 polling-delay = <2500>; /* milliseconds */
521 521
522 /* sensor ID */ 522 /* sensor ID */
523 thermal-sensors = <&adc-dummy 4>; 523 thermal-sensors = <&adc_dummy 4>;
524 524
525 trips { 525 trips {
526 ... 526 ...
@@ -531,14 +531,14 @@ thermal-zones {
531 }; 531 };
532 }; 532 };
533 533
534 board-thermal: board-thermal { 534 board_thermal: board-thermal {
535 polling-delay-passive = <1000>; /* milliseconds */ 535 polling-delay-passive = <1000>; /* milliseconds */
536 polling-delay = <2500>; /* milliseconds */ 536 polling-delay = <2500>; /* milliseconds */
537 537
538 /* sensor ID */ 538 /* sensor ID */
539 thermal-sensors = <&adc-dummy 0>, /* pcb top edge */ 539 thermal-sensors = <&adc_dummy 0>, /* pcb top edge */
540 <&adc-dummy 1>, /* lcd */ 540 <&adc_dummy 1>, /* lcd */
541 <&adc-dymmy 2>; /* back cover */ 541 <&adc_dummy 2>; /* back cover */
542 /* 542 /*
543 * An array of coefficients describing the sensor 543 * An array of coefficients describing the sensor
544 * linear relation. E.g.: 544 * linear relation. E.g.:
@@ -548,22 +548,22 @@ thermal-zones {
548 548
549 trips { 549 trips {
550 /* Trips are based on resulting linear equation */ 550 /* Trips are based on resulting linear equation */
551 cpu-trip: cpu-trip { 551 cpu_trip: cpu-trip {
552 temperature = <60000>; /* millicelsius */ 552 temperature = <60000>; /* millicelsius */
553 hysteresis = <2000>; /* millicelsius */ 553 hysteresis = <2000>; /* millicelsius */
554 type = "passive"; 554 type = "passive";
555 }; 555 };
556 gpu-trip: gpu-trip { 556 gpu_trip: gpu-trip {
557 temperature = <55000>; /* millicelsius */ 557 temperature = <55000>; /* millicelsius */
558 hysteresis = <2000>; /* millicelsius */ 558 hysteresis = <2000>; /* millicelsius */
559 type = "passive"; 559 type = "passive";
560 } 560 }
561 lcd-trip: lcp-trip { 561 lcd_trip: lcp-trip {
562 temperature = <53000>; /* millicelsius */ 562 temperature = <53000>; /* millicelsius */
563 hysteresis = <2000>; /* millicelsius */ 563 hysteresis = <2000>; /* millicelsius */
564 type = "passive"; 564 type = "passive";
565 }; 565 };
566 crit-trip: crit-trip { 566 crit_trip: crit-trip {
567 temperature = <68000>; /* millicelsius */ 567 temperature = <68000>; /* millicelsius */
568 hysteresis = <2000>; /* millicelsius */ 568 hysteresis = <2000>; /* millicelsius */
569 type = "critical"; 569 type = "critical";
@@ -572,17 +572,17 @@ thermal-zones {
572 572
573 cooling-maps { 573 cooling-maps {
574 map0 { 574 map0 {
575 trip = <&cpu-trip>; 575 trip = <&cpu_trip>;
576 cooling-device = <&cpu0 0 2>; 576 cooling-device = <&cpu0 0 2>;
577 contribution = <55>; 577 contribution = <55>;
578 }; 578 };
579 map1 { 579 map1 {
580 trip = <&gpu-trip>; 580 trip = <&gpu_trip>;
581 cooling-device = <&gpu0 0 2>; 581 cooling-device = <&gpu0 0 2>;
582 contribution = <20>; 582 contribution = <20>;
583 }; 583 };
584 map2 { 584 map2 {
585 trip = <&lcd-trip>; 585 trip = <&lcd_trip>;
586 cooling-device = <&lcd0 5 10>; 586 cooling-device = <&lcd0 5 10>;
587 contribution = <15>; 587 contribution = <15>;
588 }; 588 };
diff --git a/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt b/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt
index 37afec194949..198794963786 100644
--- a/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt
@@ -13,6 +13,11 @@ Required Properties:
13 by the GPIO flags. 13 by the GPIO flags.
14- hw_margin_ms: Maximum time to reset watchdog circuit (milliseconds). 14- hw_margin_ms: Maximum time to reset watchdog circuit (milliseconds).
15 15
16Optional Properties:
17- always-running: If the watchdog timer cannot be disabled, add this flag to
18 have the driver keep toggling the signal without a client. It will only cease
19 to toggle the signal when the device is open and the timeout elapsed.
20
16Example: 21Example:
17 watchdog: watchdog { 22 watchdog: watchdog {
18 /* ADM706 */ 23 /* ADM706 */
diff --git a/Documentation/devicetree/bindings/watchdog/imgpdc-wdt.txt b/Documentation/devicetree/bindings/watchdog/imgpdc-wdt.txt
new file mode 100644
index 000000000000..b2fa11fd43de
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/imgpdc-wdt.txt
@@ -0,0 +1,19 @@
1*ImgTec PowerDown Controller (PDC) Watchdog Timer (WDT)
2
3Required properties:
4- compatible : Should be "img,pdc-wdt"
5- reg : Should contain WDT registers location and length
6- clocks: Must contain an entry for each entry in clock-names.
7- clock-names: Should contain "wdt" and "sys"; the watchdog counter
8 clock and register interface clock respectively.
9- interrupts : Should contain WDT interrupt
10
11Examples:
12
13watchdog@18102100 {
14 compatible = "img,pdc-wdt";
15 reg = <0x18102100 0x100>;
16 clocks = <&pdc_wdt_clk>, <&sys_clk>;
17 clock-names = "wdt", "sys";
18 interrupts = <0 52 IRQ_TYPE_LEVEL_HIGH>;
19};
diff --git a/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt b/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt
new file mode 100644
index 000000000000..e27763ef0049
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt
@@ -0,0 +1,12 @@
1Ingenic Watchdog Timer (WDT) Controller for JZ4740
2
3Required properties:
4compatible: "ingenic,jz4740-watchdog"
5reg: Register address and length for watchdog registers
6
7Example:
8
9watchdog: jz4740-watchdog@0x10002000 {
10 compatible = "ingenic,jz4740-watchdog";
11 reg = <0x10002000 0x100>;
12};
diff --git a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
new file mode 100644
index 000000000000..af9eb5b8a253
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
@@ -0,0 +1,13 @@
1Mediatek SoCs Watchdog timer
2
3Required properties:
4
5- compatible : should be "mediatek,mt6589-wdt"
6- reg : Specifies base physical address and size of the registers.
7
8Example:
9
10wdt: watchdog@010000000 {
11 compatible = "mediatek,mt6589-wdt";
12 reg = <0x10000000 0x18>;
13};
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
index 766658ccf235..05d2280190f1 100644
--- a/Documentation/dmaengine/provider.txt
+++ b/Documentation/dmaengine/provider.txt
@@ -113,6 +113,31 @@ need to initialize a few fields in there:
113 * channels: should be initialized as a list using the 113 * channels: should be initialized as a list using the
114 INIT_LIST_HEAD macro for example 114 INIT_LIST_HEAD macro for example
115 115
116 * src_addr_widths:
117 - should contain a bitmask of the supported source transfer width
118
119 * dst_addr_widths:
120 - should contain a bitmask of the supported destination transfer
121 width
122
123 * directions:
124 - should contain a bitmask of the supported slave directions
125 (i.e. excluding mem2mem transfers)
126
127 * residue_granularity:
128 - Granularity of the transfer residue reported to dma_set_residue.
129 - This can be either:
130 + Descriptor
131 -> Your device doesn't support any kind of residue
132 reporting. The framework will only know that a particular
133 transaction descriptor is done.
134 + Segment
135 -> Your device is able to report which chunks have been
136 transferred
137 + Burst
138 -> Your device is able to report which burst have been
139 transferred
140
116 * dev: should hold the pointer to the struct device associated 141 * dev: should hold the pointer to the struct device associated
117 to your current driver instance. 142 to your current driver instance.
118 143
@@ -274,48 +299,36 @@ supported.
274 account the current period. 299 account the current period.
275 - This function can be called in an interrupt context. 300 - This function can be called in an interrupt context.
276 301
277 * device_control 302 * device_config
278 - Used by client drivers to control and configure the channel it 303 - Reconfigures the channel with the configuration given as
279 has a handle on. 304 argument
280 - Called with a command and an argument 305 - This command should NOT perform synchronously, or on any
281 + The command is one of the values listed by the enum 306 currently queued transfers, but only on subsequent ones
282 dma_ctrl_cmd. The valid commands are: 307 - In this case, the function will receive a dma_slave_config
283 + DMA_PAUSE 308 structure pointer as an argument, that will detail which
284 + Pauses a transfer on the channel 309 configuration to use.
285 + This command should operate synchronously on the channel, 310 - Even though that structure contains a direction field, this
286 pausing right away the work of the given channel 311 field is deprecated in favor of the direction argument given to
287 + DMA_RESUME 312 the prep_* functions
288 + Restarts a transfer on the channel 313 - This call is mandatory for slave operations only. This should NOT be
289 + This command should operate synchronously on the channel, 314 set or expected to be set for memcpy operations.
290 resuming right away the work of the given channel 315 If a driver support both, it should use this call for slave
291 + DMA_TERMINATE_ALL 316 operations only and not for memcpy ones.
292 + Aborts all the pending and ongoing transfers on the 317
293 channel 318 * device_pause
294 + This command should operate synchronously on the channel, 319 - Pauses a transfer on the channel
295 terminating right away all the channels 320 - This command should operate synchronously on the channel,
296 + DMA_SLAVE_CONFIG 321 pausing right away the work of the given channel
297 + Reconfigures the channel with passed configuration 322
298 + This command should NOT perform synchronously, or on any 323 * device_resume
299 currently queued transfers, but only on subsequent ones 324 - Resumes a transfer on the channel
300 + In this case, the function will receive a 325 - This command should operate synchronously on the channel,
301 dma_slave_config structure pointer as an argument, that 326 pausing right away the work of the given channel
302 will detail which configuration to use. 327
303 + Even though that structure contains a direction field, 328 * device_terminate_all
304 this field is deprecated in favor of the direction 329 - Aborts all the pending and ongoing transfers on the channel
305 argument given to the prep_* functions 330 - This command should operate synchronously on the channel,
306 + FSLDMA_EXTERNAL_START 331 terminating right away all the channels
307 + TODO: Why does that even exist?
308 + The argument is an opaque unsigned long. This actually is a
309 pointer to a struct dma_slave_config that should be used only
310 in the DMA_SLAVE_CONFIG.
311
312 * device_slave_caps
313 - Called through the framework by client drivers in order to have
314 an idea of what are the properties of the channel allocated to
315 them.
316 - Such properties are the buswidth, available directions, etc.
317 - Required for every generic layer doing DMA transfers, such as
318 ASoC.
319 332
320Misc notes (stuff that should be documented, but don't really know 333Misc notes (stuff that should be documented, but don't really know
321where to put them) 334where to put them)
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 2ca3d17eee56..f91926f2f482 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -164,8 +164,6 @@ the block device inode. See there for more details.
164 164
165--------------------------- file_system_type --------------------------- 165--------------------------- file_system_type ---------------------------
166prototypes: 166prototypes:
167 int (*get_sb) (struct file_system_type *, int,
168 const char *, void *, struct vfsmount *);
169 struct dentry *(*mount) (struct file_system_type *, int, 167 struct dentry *(*mount) (struct file_system_type *, int,
170 const char *, void *); 168 const char *, void *);
171 void (*kill_sb) (struct super_block *); 169 void (*kill_sb) (struct super_block *);
diff --git a/Documentation/filesystems/dlmfs.txt b/Documentation/filesystems/dlmfs.txt
index 1b528b2ad809..fcf4d509d118 100644
--- a/Documentation/filesystems/dlmfs.txt
+++ b/Documentation/filesystems/dlmfs.txt
@@ -5,8 +5,8 @@ system.
5 5
6dlmfs is built with OCFS2 as it requires most of its infrastructure. 6dlmfs is built with OCFS2 as it requires most of its infrastructure.
7 7
8Project web page: http://oss.oracle.com/projects/ocfs2 8Project web page: http://ocfs2.wiki.kernel.org
9Tools web page: http://oss.oracle.com/projects/ocfs2-tools 9Tools web page: https://github.com/markfasheh/ocfs2-tools
10OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/ 10OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/
11 11
12All code copyright 2005 Oracle except when otherwise noted. 12All code copyright 2005 Oracle except when otherwise noted.
diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt
index 28f8c08201e2..4c49e5410595 100644
--- a/Documentation/filesystems/ocfs2.txt
+++ b/Documentation/filesystems/ocfs2.txt
@@ -8,8 +8,8 @@ also make it attractive for non-clustered use.
8You'll want to install the ocfs2-tools package in order to at least 8You'll want to install the ocfs2-tools package in order to at least
9get "mount.ocfs2" and "ocfs2_hb_ctl". 9get "mount.ocfs2" and "ocfs2_hb_ctl".
10 10
11Project web page: http://oss.oracle.com/projects/ocfs2 11Project web page: http://ocfs2.wiki.kernel.org
12Tools web page: http://oss.oracle.com/projects/ocfs2-tools 12Tools git tree: https://github.com/markfasheh/ocfs2-tools
13OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/ 13OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/
14 14
15All code copyright 2005 Oracle except when otherwise noted. 15All code copyright 2005 Oracle except when otherwise noted.
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index a27c950ece61..6db0e5d1da07 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -159,6 +159,22 @@ overlay filesystem (though an operation on the name of the file such as
159rename or unlink will of course be noticed and handled). 159rename or unlink will of course be noticed and handled).
160 160
161 161
162Multiple lower layers
163---------------------
164
165Multiple lower layers can now be given using the the colon (":") as a
166separator character between the directory names. For example:
167
168 mount -t overlay overlay -olowerdir=/lower1:/lower2:/lower3 /merged
169
170As the example shows, "upperdir=" and "workdir=" may be omitted. In
171that case the overlay will be read-only.
172
173The specified lower directories will be stacked beginning from the
174rightmost one and going left. In the above example lower1 will be the
175top, lower2 the middle and lower3 the bottom layer.
176
177
162Non-standard behavior 178Non-standard behavior
163--------------------- 179---------------------
164 180
@@ -196,3 +212,15 @@ Changes to the underlying filesystems while part of a mounted overlay
196filesystem are not allowed. If the underlying filesystem is changed, 212filesystem are not allowed. If the underlying filesystem is changed,
197the behavior of the overlay is undefined, though it will not result in 213the behavior of the overlay is undefined, though it will not result in
198a crash or deadlock. 214a crash or deadlock.
215
216Testsuite
217---------
218
219There's testsuite developed by David Howells at:
220
221 git://git.infradead.org/users/dhowells/unionmount-testsuite.git
222
223Run as root:
224
225 # cd unionmount-testsuite
226 # ./run --ov
diff --git a/Documentation/i2c/functionality b/Documentation/i2c/functionality
index 4556a3eb87c4..4aae8ed15873 100644
--- a/Documentation/i2c/functionality
+++ b/Documentation/i2c/functionality
@@ -12,7 +12,7 @@ FUNCTIONALITY CONSTANTS
12----------------------- 12-----------------------
13 13
14For the most up-to-date list of functionality constants, please check 14For the most up-to-date list of functionality constants, please check
15<linux/i2c.h>! 15<uapi/linux/i2c.h>!
16 16
17 I2C_FUNC_I2C Plain i2c-level commands (Pure SMBus 17 I2C_FUNC_I2C Plain i2c-level commands (Pure SMBus
18 adapters typically can not do these) 18 adapters typically can not do these)
diff --git a/Documentation/ia64/paravirt_ops.txt b/Documentation/ia64/paravirt_ops.txt
deleted file mode 100644
index 39ded02ec33f..000000000000
--- a/Documentation/ia64/paravirt_ops.txt
+++ /dev/null
@@ -1,137 +0,0 @@
1Paravirt_ops on IA64
2====================
3 21 May 2008, Isaku Yamahata <yamahata@valinux.co.jp>
4
5
6Introduction
7------------
8The aim of this documentation is to help with maintainability and/or to
9encourage people to use paravirt_ops/IA64.
10
11paravirt_ops (pv_ops in short) is a way for virtualization support of
12Linux kernel on x86. Several ways for virtualization support were
13proposed, paravirt_ops is the winner.
14On the other hand, now there are also several IA64 virtualization
15technologies like kvm/IA64, xen/IA64 and many other academic IA64
16hypervisors so that it is good to add generic virtualization
17infrastructure on Linux/IA64.
18
19
20What is paravirt_ops?
21---------------------
22It has been developed on x86 as virtualization support via API, not ABI.
23It allows each hypervisor to override operations which are important for
24hypervisors at API level. And it allows a single kernel binary to run on
25all supported execution environments including native machine.
26Essentially paravirt_ops is a set of function pointers which represent
27operations corresponding to low level sensitive instructions and high
28level functionalities in various area. But one significant difference
29from usual function pointer table is that it allows optimization with
30binary patch. It is because some of these operations are very
31performance sensitive and indirect call overhead is not negligible.
32With binary patch, indirect C function call can be transformed into
33direct C function call or in-place execution to eliminate the overhead.
34
35Thus, operations of paravirt_ops are classified into three categories.
36- simple indirect call
37 These operations correspond to high level functionality so that the
38 overhead of indirect call isn't very important.
39
40- indirect call which allows optimization with binary patch
41 Usually these operations correspond to low level instructions. They
42 are called frequently and performance critical. So the overhead is
43 very important.
44
45- a set of macros for hand written assembly code
46 Hand written assembly codes (.S files) also need paravirtualization
47 because they include sensitive instructions or some of code paths in
48 them are very performance critical.
49
50
51The relation to the IA64 machine vector
52---------------------------------------
53Linux/IA64 has the IA64 machine vector functionality which allows the
54kernel to switch implementations (e.g. initialization, ipi, dma api...)
55depending on executing platform.
56We can replace some implementations very easily defining a new machine
57vector. Thus another approach for virtualization support would be
58enhancing the machine vector functionality.
59But paravirt_ops approach was taken because
60- virtualization support needs wider support than machine vector does.
61 e.g. low level instruction paravirtualization. It must be
62 initialized very early before platform detection.
63
64- virtualization support needs more functionality like binary patch.
65 Probably the calling overhead might not be very large compared to the
66 emulation overhead of virtualization. However in the native case, the
67 overhead should be eliminated completely.
68 A single kernel binary should run on each environment including native,
69 and the overhead of paravirt_ops on native environment should be as
70 small as possible.
71
72- for full virtualization technology, e.g. KVM/IA64 or
73 Xen/IA64 HVM domain, the result would be
74 (the emulated platform machine vector. probably dig) + (pv_ops).
75 This means that the virtualization support layer should be under
76 the machine vector layer.
77
78Possibly it might be better to move some function pointers from
79paravirt_ops to machine vector. In fact, Xen domU case utilizes both
80pv_ops and machine vector.
81
82
83IA64 paravirt_ops
84-----------------
85In this section, the concrete paravirt_ops will be discussed.
86Because of the architecture difference between ia64 and x86, the
87resulting set of functions is very different from x86 pv_ops.
88
89- C function pointer tables
90They are not very performance critical so that simple C indirect
91function call is acceptable. The following structures are defined at
92this moment. For details see linux/include/asm-ia64/paravirt.h
93 - struct pv_info
94 This structure describes the execution environment.
95 - struct pv_init_ops
96 This structure describes the various initialization hooks.
97 - struct pv_iosapic_ops
98 This structure describes hooks to iosapic operations.
99 - struct pv_irq_ops
100 This structure describes hooks to irq related operations
101 - struct pv_time_op
102 This structure describes hooks to steal time accounting.
103
104- a set of indirect calls which need optimization
105Currently this class of functions correspond to a subset of IA64
106intrinsics. At this moment the optimization with binary patch isn't
107implemented yet.
108struct pv_cpu_op is defined. For details see
109linux/include/asm-ia64/paravirt_privop.h
110Mostly they correspond to ia64 intrinsics 1-to-1.
111Caveat: Now they are defined as C indirect function pointers, but in
112order to support binary patch optimization, they will be changed
113using GCC extended inline assembly code.
114
115- a set of macros for hand written assembly code (.S files)
116For maintenance purpose, the taken approach for .S files is single
117source code and compile multiple times with different macros definitions.
118Each pv_ops instance must define those macros to compile.
119The important thing here is that sensitive, but non-privileged
120instructions must be paravirtualized and that some privileged
121instructions also need paravirtualization for reasonable performance.
122Developers who modify .S files must be aware of that. At this moment
123an easy checker is implemented to detect paravirtualization breakage.
124But it doesn't cover all the cases.
125
126Sometimes this set of macros is called pv_cpu_asm_op. But there is no
127corresponding structure in the source code.
128Those macros mostly 1:1 correspond to a subset of privileged
129instructions. See linux/include/asm-ia64/native/inst.h.
130And some functions written in assembly also need to be overrided so
131that each pv_ops instance have to define some macros. Again see
132linux/include/asm-ia64/native/inst.h.
133
134
135Those structures must be initialized very early before start_kernel.
136Probably initialized in head.S using multi entry point or some other trick.
137For native case implementation see linux/arch/ia64/kernel/paravirt.c.
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index 90bca6f988e1..a63e5e013a8c 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -3,8 +3,8 @@ ALPS Touchpad Protocol
3 3
4Introduction 4Introduction
5------------ 5------------
6Currently the ALPS touchpad driver supports five protocol versions in use by 6Currently the ALPS touchpad driver supports seven protocol versions in use by
7ALPS touchpads, called versions 1, 2, 3, 4 and 5. 7ALPS touchpads, called versions 1, 2, 3, 4, 5, 6 and 7.
8 8
9Since roughly mid-2010 several new ALPS touchpads have been released and 9Since roughly mid-2010 several new ALPS touchpads have been released and
10integrated into a variety of laptops and netbooks. These new touchpads 10integrated into a variety of laptops and netbooks. These new touchpads
@@ -240,3 +240,67 @@ For mt, the format is:
240 byte 3: 0 x23 x22 x21 x20 x19 x18 x17 240 byte 3: 0 x23 x22 x21 x20 x19 x18 x17
241 byte 4: 0 x9 x8 x7 x6 x5 x4 x3 241 byte 4: 0 x9 x8 x7 x6 x5 x4 x3
242 byte 5: 0 x16 x15 x14 x13 x12 x11 x10 242 byte 5: 0 x16 x15 x14 x13 x12 x11 x10
243
244ALPS Absolute Mode - Protocol Version 6
245---------------------------------------
246
247For trackstick packet, the format is:
248
249 byte 0: 1 1 1 1 1 1 1 1
250 byte 1: 0 X6 X5 X4 X3 X2 X1 X0
251 byte 2: 0 Y6 Y5 Y4 Y3 Y2 Y1 Y0
252 byte 3: ? Y7 X7 ? ? M R L
253 byte 4: Z7 Z6 Z5 Z4 Z3 Z2 Z1 Z0
254 byte 5: 0 1 1 1 1 1 1 1
255
256For touchpad packet, the format is:
257
258 byte 0: 1 1 1 1 1 1 1 1
259 byte 1: 0 0 0 0 x3 x2 x1 x0
260 byte 2: 0 0 0 0 y3 y2 y1 y0
261 byte 3: ? x7 x6 x5 x4 ? r l
262 byte 4: ? y7 y6 y5 y4 ? ? ?
263 byte 5: z7 z6 z5 z4 z3 z2 z1 z0
264
265(v6 touchpad does not have middle button)
266
267ALPS Absolute Mode - Protocol Version 7
268---------------------------------------
269
270For trackstick packet, the format is:
271
272 byte 0: 0 1 0 0 1 0 0 0
273 byte 1: 1 1 * * 1 M R L
274 byte 2: X7 1 X5 X4 X3 X2 X1 X0
275 byte 3: Z6 1 Y6 X6 1 Y2 Y1 Y0
276 byte 4: Y7 0 Y5 Y4 Y3 1 1 0
277 byte 5: T&P 0 Z5 Z4 Z3 Z2 Z1 Z0
278
279For touchpad packet, the format is:
280
281 packet-fmt b7 b6 b5 b4 b3 b2 b1 b0
282 byte 0: TWO & MULTI L 1 R M 1 Y0-2 Y0-1 Y0-0
283 byte 0: NEW L 1 X1-5 1 1 Y0-2 Y0-1 Y0-0
284 byte 1: Y0-10 Y0-9 Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
285 byte 2: X0-11 1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
286 byte 3: X1-11 1 X0-4 X0-3 1 X0-2 X0-1 X0-0
287 byte 4: TWO X1-10 TWO X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
288 byte 4: MULTI X1-10 TWO X1-9 X1-8 X1-7 X1-6 Y1-5 1
289 byte 4: NEW X1-10 TWO X1-9 X1-8 X1-7 X1-6 0 0
290 byte 5: TWO & NEW Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
291 byte 5: MULTI Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 F-1 F-0
292
293 L: Left button
294 R / M: Non-clickpads: Right / Middle button
295 Clickpads: When > 2 fingers are down, and some fingers
296 are in the button area, then the 2 coordinates reported
297 are for fingers outside the button area and these report
298 extra fingers being present in the right / left button
299 area. Note these fingers are not added to the F field!
300 so if a TWO packet is received and R = 1 then there are
301 3 fingers down, etc.
302 TWO: 1: Two touches present, byte 0/4/5 are in TWO fmt
303 0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
304 otherwise byte 0 bit 4 must be set and byte 0/4/5 are
305 in NEW fmt
306 F: Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index a311db829e9b..74b6c6d97210 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -524,15 +524,16 @@ more details, with real examples.
524 Example: 524 Example:
525 #arch/x86/Makefile 525 #arch/x86/Makefile
526 cflags-y += $(shell \ 526 cflags-y += $(shell \
527 if [ $(call cc-version) -ge 0300 ] ; then \ 527 if [ $(cc-version) -ge 0300 ] ; then \
528 echo "-mregparm=3"; fi ;) 528 echo "-mregparm=3"; fi ;)
529 529
530 In the above example, -mregparm=3 is only used for gcc version greater 530 In the above example, -mregparm=3 is only used for gcc version greater
531 than or equal to gcc 3.0. 531 than or equal to gcc 3.0.
532 532
533 cc-ifversion 533 cc-ifversion
534 cc-ifversion tests the version of $(CC) and equals last argument if 534 cc-ifversion tests the version of $(CC) and equals the fourth parameter
535 version expression is true. 535 if version expression is true, or the fifth (if given) if the version
536 expression is false.
536 537
537 Example: 538 Example:
538 #fs/reiserfs/Makefile 539 #fs/reiserfs/Makefile
@@ -552,7 +553,7 @@ more details, with real examples.
552 553
553 Example: 554 Example:
554 #arch/powerpc/Makefile 555 #arch/powerpc/Makefile
555 $(Q)if test "$(call cc-fullversion)" = "040200" ; then \ 556 $(Q)if test "$(cc-fullversion)" = "040200" ; then \
556 echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \ 557 echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
557 false ; \ 558 false ; \
558 fi 559 fi
@@ -751,12 +752,12 @@ generated by kbuild are deleted all over the kernel src tree when
751Additional files can be specified in kbuild makefiles by use of $(clean-files). 752Additional files can be specified in kbuild makefiles by use of $(clean-files).
752 753
753 Example: 754 Example:
754 #drivers/pci/Makefile 755 #lib/Makefile
755 clean-files := devlist.h classlist.h 756 clean-files := crc32table.h
756 757
757When executing "make clean", the two files "devlist.h classlist.h" will be 758When executing "make clean", the two files "devlist.h classlist.h" will be
758deleted. Kbuild will assume files to be in the same relative directory as the 759deleted. Kbuild will assume files to be in the same relative directory as the
759Makefile except if an absolute path is specified (path starting with '/'). 760Makefile, except if prefixed with $(objtree).
760 761
761To delete a directory hierarchy use: 762To delete a directory hierarchy use:
762 763
@@ -764,9 +765,8 @@ To delete a directory hierarchy use:
764 #scripts/package/Makefile 765 #scripts/package/Makefile
765 clean-dirs := $(objtree)/debian/ 766 clean-dirs := $(objtree)/debian/
766 767
767This will delete the directory debian, including all subdirectories. 768This will delete the directory debian in the toplevel directory, including all
768Kbuild will assume the directories to be in the same relative path as the 769subdirectories.
769Makefile if no absolute path is specified (path does not start with '/').
770 770
771To exclude certain files from make clean, use the $(no-clean-files) variable. 771To exclude certain files from make clean, use the $(no-clean-files) variable.
772This is only a special case used in the top level Kbuild file: 772This is only a special case used in the top level Kbuild file:
diff --git a/Documentation/virtual/00-INDEX b/Documentation/virtual/00-INDEX
index e952d30bbf0f..af0d23968ee7 100644
--- a/Documentation/virtual/00-INDEX
+++ b/Documentation/virtual/00-INDEX
@@ -2,6 +2,9 @@ Virtualization support in the Linux kernel.
2 2
300-INDEX 300-INDEX
4 - this file. 4 - this file.
5
6paravirt_ops.txt
7 - Describes the Linux kernel pv_ops to support different hypervisors
5kvm/ 8kvm/
6 - Kernel Virtual Machine. See also http://linux-kvm.org 9 - Kernel Virtual Machine. See also http://linux-kvm.org
7uml/ 10uml/
diff --git a/Documentation/virtual/paravirt_ops.txt b/Documentation/virtual/paravirt_ops.txt
new file mode 100644
index 000000000000..d4881c00e339
--- /dev/null
+++ b/Documentation/virtual/paravirt_ops.txt
@@ -0,0 +1,32 @@
1Paravirt_ops
2============
3
4Linux provides support for different hypervisor virtualization technologies.
5Historically different binary kernels would be required in order to support
6different hypervisors, this restriction was removed with pv_ops.
7Linux pv_ops is a virtualization API which enables support for different
8hypervisors. It allows each hypervisor to override critical operations and
9allows a single kernel binary to run on all supported execution environments
10including native machine -- without any hypervisors.
11
12pv_ops provides a set of function pointers which represent operations
13corresponding to low level critical instructions and high level
14functionalities in various areas. pv-ops allows for optimizations at run
15time by enabling binary patching of the low-ops critical operations
16at boot time.
17
18pv_ops operations are classified into three categories:
19
20- simple indirect call
21 These operations correspond to high level functionality where it is
22 known that the overhead of indirect call isn't very important.
23
24- indirect call which allows optimization with binary patch
25 Usually these operations correspond to low level critical instructions. They
26 are called frequently and are performance critical. The overhead is
27 very important.
28
29- a set of macros for hand written assembly code
30 Hand written assembly codes (.S files) also need paravirtualization
31 because they include sensitive instructions or some of code paths in
32 them are very performance critical.
diff --git a/Documentation/x86/zero-page.txt b/Documentation/x86/zero-page.txt
index 199f453cb4de..82fbdbc1e0b0 100644
--- a/Documentation/x86/zero-page.txt
+++ b/Documentation/x86/zero-page.txt
@@ -3,7 +3,7 @@ protocol of kernel. These should be filled by bootloader or 16-bit
3real-mode setup code of the kernel. References/settings to it mainly 3real-mode setup code of the kernel. References/settings to it mainly
4are in: 4are in:
5 5
6 arch/x86/include/asm/bootparam.h 6 arch/x86/include/uapi/asm/bootparam.h
7 7
8 8
9Offset Proto Name Meaning 9Offset Proto Name Meaning
diff --git a/Kbuild b/Kbuild
index b8b708ad6dc3..ab8ded92e870 100644
--- a/Kbuild
+++ b/Kbuild
@@ -5,24 +5,23 @@
5# 2) Generate asm-offsets.h (may need bounds.h) 5# 2) Generate asm-offsets.h (may need bounds.h)
6# 3) Check for missing system calls 6# 3) Check for missing system calls
7 7
8##### 8# Default sed regexp - multiline due to syntax constraints
9# 1) Generate bounds.h 9define sed-y
10 10 "/^->/{s:->#\(.*\):/* \1 */:; \
11bounds-file := include/generated/bounds.h 11 s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
12 12 s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
13always := $(bounds-file) 13 s:->::; p;}"
14targets := $(bounds-file) kernel/bounds.s 14endef
15 15
16quiet_cmd_bounds = GEN $@ 16quiet_cmd_offsets = GEN $@
17define cmd_bounds 17define cmd_offsets
18 (set -e; \ 18 (set -e; \
19 echo "#ifndef __LINUX_BOUNDS_H__"; \ 19 echo "#ifndef $2"; \
20 echo "#define __LINUX_BOUNDS_H__"; \ 20 echo "#define $2"; \
21 echo "/*"; \ 21 echo "/*"; \
22 echo " * DO NOT MODIFY."; \ 22 echo " * DO NOT MODIFY."; \
23 echo " *"; \ 23 echo " *"; \
24 echo " * This file was generated by Kbuild"; \ 24 echo " * This file was generated by Kbuild"; \
25 echo " *"; \
26 echo " */"; \ 25 echo " */"; \
27 echo ""; \ 26 echo ""; \
28 sed -ne $(sed-y) $<; \ 27 sed -ne $(sed-y) $<; \
@@ -30,6 +29,14 @@ define cmd_bounds
30 echo "#endif" ) > $@ 29 echo "#endif" ) > $@
31endef 30endef
32 31
32#####
33# 1) Generate bounds.h
34
35bounds-file := include/generated/bounds.h
36
37always := $(bounds-file)
38targets := $(bounds-file) kernel/bounds.s
39
33# We use internal kbuild rules to avoid the "is up to date" message from make 40# We use internal kbuild rules to avoid the "is up to date" message from make
34kernel/bounds.s: kernel/bounds.c FORCE 41kernel/bounds.s: kernel/bounds.c FORCE
35 $(Q)mkdir -p $(dir $@) 42 $(Q)mkdir -p $(dir $@)
@@ -37,7 +44,7 @@ kernel/bounds.s: kernel/bounds.c FORCE
37 44
38$(obj)/$(bounds-file): kernel/bounds.s Kbuild 45$(obj)/$(bounds-file): kernel/bounds.s Kbuild
39 $(Q)mkdir -p $(dir $@) 46 $(Q)mkdir -p $(dir $@)
40 $(call cmd,bounds) 47 $(call cmd,offsets,__LINUX_BOUNDS_H__)
41 48
42##### 49#####
43# 2) Generate asm-offsets.h 50# 2) Generate asm-offsets.h
@@ -49,32 +56,6 @@ always += $(offsets-file)
49targets += $(offsets-file) 56targets += $(offsets-file)
50targets += arch/$(SRCARCH)/kernel/asm-offsets.s 57targets += arch/$(SRCARCH)/kernel/asm-offsets.s
51 58
52
53# Default sed regexp - multiline due to syntax constraints
54define sed-y
55 "/^->/{s:->#\(.*\):/* \1 */:; \
56 s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
57 s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
58 s:->::; p;}"
59endef
60
61quiet_cmd_offsets = GEN $@
62define cmd_offsets
63 (set -e; \
64 echo "#ifndef __ASM_OFFSETS_H__"; \
65 echo "#define __ASM_OFFSETS_H__"; \
66 echo "/*"; \
67 echo " * DO NOT MODIFY."; \
68 echo " *"; \
69 echo " * This file was generated by Kbuild"; \
70 echo " *"; \
71 echo " */"; \
72 echo ""; \
73 sed -ne $(sed-y) $<; \
74 echo ""; \
75 echo "#endif" ) > $@
76endef
77
78# We use internal kbuild rules to avoid the "is up to date" message from make 59# We use internal kbuild rules to avoid the "is up to date" message from make
79arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \ 60arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \
80 $(obj)/$(bounds-file) FORCE 61 $(obj)/$(bounds-file) FORCE
@@ -82,7 +63,7 @@ arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \
82 $(call if_changed_dep,cc_s_c) 63 $(call if_changed_dep,cc_s_c)
83 64
84$(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s Kbuild 65$(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s Kbuild
85 $(call cmd,offsets) 66 $(call cmd,offsets,__ASM_OFFSETS_H__)
86 67
87##### 68#####
88# 3) Check for missing system calls 69# 3) Check for missing system calls
diff --git a/MAINTAINERS b/MAINTAINERS
index 274a0058f3f2..a6ae6eb0c545 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2065,7 +2065,7 @@ F: include/net/bluetooth/
2065BONDING DRIVER 2065BONDING DRIVER
2066M: Jay Vosburgh <j.vosburgh@gmail.com> 2066M: Jay Vosburgh <j.vosburgh@gmail.com>
2067M: Veaceslav Falico <vfalico@gmail.com> 2067M: Veaceslav Falico <vfalico@gmail.com>
2068M: Andy Gospodarek <andy@greyhouse.net> 2068M: Andy Gospodarek <gospo@cumulusnetworks.com>
2069L: netdev@vger.kernel.org 2069L: netdev@vger.kernel.org
2070W: http://sourceforge.net/projects/bonding/ 2070W: http://sourceforge.net/projects/bonding/
2071S: Supported 2071S: Supported
@@ -2433,7 +2433,8 @@ F: arch/powerpc/oprofile/*cell*
2433F: arch/powerpc/platforms/cell/ 2433F: arch/powerpc/platforms/cell/
2434 2434
2435CEPH DISTRIBUTED FILE SYSTEM CLIENT 2435CEPH DISTRIBUTED FILE SYSTEM CLIENT
2436M: Sage Weil <sage@inktank.com> 2436M: Yan, Zheng <zyan@redhat.com>
2437M: Sage Weil <sage@redhat.com>
2437L: ceph-devel@vger.kernel.org 2438L: ceph-devel@vger.kernel.org
2438W: http://ceph.com/ 2439W: http://ceph.com/
2439T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git 2440T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
@@ -3936,7 +3937,7 @@ S: Maintained
3936F: drivers/staging/fbtft/ 3937F: drivers/staging/fbtft/
3937 3938
3938FCOE SUBSYSTEM (libfc, libfcoe, fcoe) 3939FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
3939M: Robert Love <robert.w.love@intel.com> 3940M: Vasu Dev <vasu.dev@intel.com>
3940L: fcoe-devel@open-fcoe.org 3941L: fcoe-devel@open-fcoe.org
3941W: www.Open-FCoE.org 3942W: www.Open-FCoE.org
3942S: Supported 3943S: Supported
@@ -4092,6 +4093,12 @@ S: Maintained
4092F: include/linux/platform_data/video-imxfb.h 4093F: include/linux/platform_data/video-imxfb.h
4093F: drivers/video/fbdev/imxfb.c 4094F: drivers/video/fbdev/imxfb.c
4094 4095
4096FREESCALE QUAD SPI DRIVER
4097M: Han Xu <han.xu@freescale.com>
4098L: linux-mtd@lists.infradead.org
4099S: Maintained
4100F: drivers/mtd/spi-nor/fsl-quadspi.c
4101
4095FREESCALE SOC FS_ENET DRIVER 4102FREESCALE SOC FS_ENET DRIVER
4096M: Pantelis Antoniou <pantelis.antoniou@gmail.com> 4103M: Pantelis Antoniou <pantelis.antoniou@gmail.com>
4097M: Vitaly Bordug <vbordug@ru.mvista.com> 4104M: Vitaly Bordug <vbordug@ru.mvista.com>
@@ -7206,8 +7213,7 @@ ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
7206M: Mark Fasheh <mfasheh@suse.com> 7213M: Mark Fasheh <mfasheh@suse.com>
7207M: Joel Becker <jlbec@evilplan.org> 7214M: Joel Becker <jlbec@evilplan.org>
7208L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers) 7215L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
7209W: http://oss.oracle.com/projects/ocfs2/ 7216W: http://ocfs2.wiki.kernel.org
7210T: git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git
7211S: Supported 7217S: Supported
7212F: Documentation/filesystems/ocfs2.txt 7218F: Documentation/filesystems/ocfs2.txt
7213F: Documentation/filesystems/dlmfs.txt 7219F: Documentation/filesystems/dlmfs.txt
@@ -7296,7 +7302,7 @@ M: Alok Kataria <akataria@vmware.com>
7296M: Rusty Russell <rusty@rustcorp.com.au> 7302M: Rusty Russell <rusty@rustcorp.com.au>
7297L: virtualization@lists.linux-foundation.org 7303L: virtualization@lists.linux-foundation.org
7298S: Supported 7304S: Supported
7299F: Documentation/ia64/paravirt_ops.txt 7305F: Documentation/virtual/paravirt_ops.txt
7300F: arch/*/kernel/paravirt* 7306F: arch/*/kernel/paravirt*
7301F: arch/*/include/asm/paravirt.h 7307F: arch/*/include/asm/paravirt.h
7302 7308
@@ -7992,8 +7998,8 @@ S: Supported
7992F: drivers/net/wireless/ath/wcn36xx/ 7998F: drivers/net/wireless/ath/wcn36xx/
7993 7999
7994RADOS BLOCK DEVICE (RBD) 8000RADOS BLOCK DEVICE (RBD)
7995M: Yehuda Sadeh <yehuda@inktank.com> 8001M: Ilya Dryomov <idryomov@gmail.com>
7996M: Sage Weil <sage@inktank.com> 8002M: Sage Weil <sage@redhat.com>
7997M: Alex Elder <elder@kernel.org> 8003M: Alex Elder <elder@kernel.org>
7998M: ceph-devel@vger.kernel.org 8004M: ceph-devel@vger.kernel.org
7999W: http://ceph.com/ 8005W: http://ceph.com/
@@ -8496,6 +8502,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER
8496M: Viresh Kumar <viresh.linux@gmail.com> 8502M: Viresh Kumar <viresh.linux@gmail.com>
8497M: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 8503M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
8498S: Maintained 8504S: Maintained
8505F: include/linux/dma/dw.h
8499F: include/linux/platform_data/dma-dw.h 8506F: include/linux/platform_data/dma-dw.h
8500F: drivers/dma/dw/ 8507F: drivers/dma/dw/
8501 8508
@@ -8558,7 +8565,7 @@ S: Maintained
8558F: drivers/scsi/sr* 8565F: drivers/scsi/sr*
8559 8566
8560SCSI RDMA PROTOCOL (SRP) INITIATOR 8567SCSI RDMA PROTOCOL (SRP) INITIATOR
8561M: Bart Van Assche <bvanassche@acm.org> 8568M: Bart Van Assche <bart.vanassche@sandisk.com>
8562L: linux-rdma@vger.kernel.org 8569L: linux-rdma@vger.kernel.org
8563S: Supported 8570S: Supported
8564W: http://www.openfabrics.org 8571W: http://www.openfabrics.org
@@ -9710,6 +9717,11 @@ L: linux-omap@vger.kernel.org
9710S: Maintained 9717S: Maintained
9711F: drivers/thermal/ti-soc-thermal/ 9718F: drivers/thermal/ti-soc-thermal/
9712 9719
9720TI CDCE706 CLOCK DRIVER
9721M: Max Filippov <jcmvbkbc@gmail.com>
9722S: Maintained
9723F: drivers/clk/clk-cdce706.c
9724
9713TI CLOCK DRIVER 9725TI CLOCK DRIVER
9714M: Tero Kristo <t-kristo@ti.com> 9726M: Tero Kristo <t-kristo@ti.com>
9715L: linux-omap@vger.kernel.org 9727L: linux-omap@vger.kernel.org
diff --git a/Makefile b/Makefile
index dd8796caa239..e6a9b1b94656 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 3 1VERSION = 4
2PATCHLEVEL = 19 2PATCHLEVEL = 0
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = 4EXTRAVERSION = -rc2
5NAME = Diseased Newt 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
@@ -502,7 +502,7 @@ endif
502ifeq ($(KBUILD_EXTMOD),) 502ifeq ($(KBUILD_EXTMOD),)
503 ifneq ($(filter config %config,$(MAKECMDGOALS)),) 503 ifneq ($(filter config %config,$(MAKECMDGOALS)),)
504 config-targets := 1 504 config-targets := 1
505 ifneq ($(filter-out config %config,$(MAKECMDGOALS)),) 505 ifneq ($(words $(MAKECMDGOALS)),1)
506 mixed-targets := 1 506 mixed-targets := 1
507 endif 507 endif
508 endif 508 endif
@@ -1180,7 +1180,7 @@ CLEAN_DIRS += $(MODVERDIR)
1180# Directories & files removed with 'make mrproper' 1180# Directories & files removed with 'make mrproper'
1181MRPROPER_DIRS += include/config usr/include include/generated \ 1181MRPROPER_DIRS += include/config usr/include include/generated \
1182 arch/*/include/generated .tmp_objdiff 1182 arch/*/include/generated .tmp_objdiff
1183MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \ 1183MRPROPER_FILES += .config .config.old .version .old_version \
1184 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \ 1184 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
1185 signing_key.priv signing_key.x509 x509.genkey \ 1185 signing_key.priv signing_key.x509 x509.genkey \
1186 extra_certificates signing_key.x509.keyid \ 1186 extra_certificates signing_key.x509.keyid \
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index 766fdfde2b7a..9b0d40093c9a 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -27,7 +27,7 @@
27#define get_ds() (KERNEL_DS) 27#define get_ds() (KERNEL_DS)
28#define set_fs(x) (current_thread_info()->addr_limit = (x)) 28#define set_fs(x) (current_thread_info()->addr_limit = (x))
29 29
30#define segment_eq(a,b) ((a).seg == (b).seg) 30#define segment_eq(a, b) ((a).seg == (b).seg)
31 31
32/* 32/*
33 * Is a address valid? This does a straightforward calculation rather 33 * Is a address valid? This does a straightforward calculation rather
@@ -39,13 +39,13 @@
39 * - AND "addr+size" doesn't have any high-bits set 39 * - AND "addr+size" doesn't have any high-bits set
40 * - OR we are in kernel mode. 40 * - OR we are in kernel mode.
41 */ 41 */
42#define __access_ok(addr,size,segment) \ 42#define __access_ok(addr, size, segment) \
43 (((segment).seg & (addr | size | (addr+size))) == 0) 43 (((segment).seg & (addr | size | (addr+size))) == 0)
44 44
45#define access_ok(type,addr,size) \ 45#define access_ok(type, addr, size) \
46({ \ 46({ \
47 __chk_user_ptr(addr); \ 47 __chk_user_ptr(addr); \
48 __access_ok(((unsigned long)(addr)),(size),get_fs()); \ 48 __access_ok(((unsigned long)(addr)), (size), get_fs()); \
49}) 49})
50 50
51/* 51/*
@@ -60,20 +60,20 @@
60 * (a) re-use the arguments for side effects (sizeof/typeof is ok) 60 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
61 * (b) require any knowledge of processes at this stage 61 * (b) require any knowledge of processes at this stage
62 */ 62 */
63#define put_user(x,ptr) \ 63#define put_user(x, ptr) \
64 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs()) 64 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs())
65#define get_user(x,ptr) \ 65#define get_user(x, ptr) \
66 __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs()) 66 __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
67 67
68/* 68/*
69 * The "__xxx" versions do not do address space checking, useful when 69 * The "__xxx" versions do not do address space checking, useful when
70 * doing multiple accesses to the same area (the programmer has to do the 70 * doing multiple accesses to the same area (the programmer has to do the
71 * checks by hand with "access_ok()") 71 * checks by hand with "access_ok()")
72 */ 72 */
73#define __put_user(x,ptr) \ 73#define __put_user(x, ptr) \
74 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 74 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
75#define __get_user(x,ptr) \ 75#define __get_user(x, ptr) \
76 __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 76 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
77 77
78/* 78/*
79 * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to 79 * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
@@ -84,7 +84,7 @@
84 84
85extern void __get_user_unknown(void); 85extern void __get_user_unknown(void);
86 86
87#define __get_user_nocheck(x,ptr,size) \ 87#define __get_user_nocheck(x, ptr, size) \
88({ \ 88({ \
89 long __gu_err = 0; \ 89 long __gu_err = 0; \
90 unsigned long __gu_val; \ 90 unsigned long __gu_val; \
@@ -96,16 +96,16 @@ extern void __get_user_unknown(void);
96 case 8: __get_user_64(ptr); break; \ 96 case 8: __get_user_64(ptr); break; \
97 default: __get_user_unknown(); break; \ 97 default: __get_user_unknown(); break; \
98 } \ 98 } \
99 (x) = (__typeof__(*(ptr))) __gu_val; \ 99 (x) = (__force __typeof__(*(ptr))) __gu_val; \
100 __gu_err; \ 100 __gu_err; \
101}) 101})
102 102
103#define __get_user_check(x,ptr,size,segment) \ 103#define __get_user_check(x, ptr, size, segment) \
104({ \ 104({ \
105 long __gu_err = -EFAULT; \ 105 long __gu_err = -EFAULT; \
106 unsigned long __gu_val = 0; \ 106 unsigned long __gu_val = 0; \
107 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 107 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
108 if (__access_ok((unsigned long)__gu_addr,size,segment)) { \ 108 if (__access_ok((unsigned long)__gu_addr, size, segment)) { \
109 __gu_err = 0; \ 109 __gu_err = 0; \
110 switch (size) { \ 110 switch (size) { \
111 case 1: __get_user_8(__gu_addr); break; \ 111 case 1: __get_user_8(__gu_addr); break; \
@@ -115,7 +115,7 @@ extern void __get_user_unknown(void);
115 default: __get_user_unknown(); break; \ 115 default: __get_user_unknown(); break; \
116 } \ 116 } \
117 } \ 117 } \
118 (x) = (__typeof__(*(ptr))) __gu_val; \ 118 (x) = (__force __typeof__(*(ptr))) __gu_val; \
119 __gu_err; \ 119 __gu_err; \
120}) 120})
121 121
@@ -201,31 +201,31 @@ struct __large_struct { unsigned long buf[100]; };
201 201
202extern void __put_user_unknown(void); 202extern void __put_user_unknown(void);
203 203
204#define __put_user_nocheck(x,ptr,size) \ 204#define __put_user_nocheck(x, ptr, size) \
205({ \ 205({ \
206 long __pu_err = 0; \ 206 long __pu_err = 0; \
207 __chk_user_ptr(ptr); \ 207 __chk_user_ptr(ptr); \
208 switch (size) { \ 208 switch (size) { \
209 case 1: __put_user_8(x,ptr); break; \ 209 case 1: __put_user_8(x, ptr); break; \
210 case 2: __put_user_16(x,ptr); break; \ 210 case 2: __put_user_16(x, ptr); break; \
211 case 4: __put_user_32(x,ptr); break; \ 211 case 4: __put_user_32(x, ptr); break; \
212 case 8: __put_user_64(x,ptr); break; \ 212 case 8: __put_user_64(x, ptr); break; \
213 default: __put_user_unknown(); break; \ 213 default: __put_user_unknown(); break; \
214 } \ 214 } \
215 __pu_err; \ 215 __pu_err; \
216}) 216})
217 217
218#define __put_user_check(x,ptr,size,segment) \ 218#define __put_user_check(x, ptr, size, segment) \
219({ \ 219({ \
220 long __pu_err = -EFAULT; \ 220 long __pu_err = -EFAULT; \
221 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 221 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
222 if (__access_ok((unsigned long)__pu_addr,size,segment)) { \ 222 if (__access_ok((unsigned long)__pu_addr, size, segment)) { \
223 __pu_err = 0; \ 223 __pu_err = 0; \
224 switch (size) { \ 224 switch (size) { \
225 case 1: __put_user_8(x,__pu_addr); break; \ 225 case 1: __put_user_8(x, __pu_addr); break; \
226 case 2: __put_user_16(x,__pu_addr); break; \ 226 case 2: __put_user_16(x, __pu_addr); break; \
227 case 4: __put_user_32(x,__pu_addr); break; \ 227 case 4: __put_user_32(x, __pu_addr); break; \
228 case 8: __put_user_64(x,__pu_addr); break; \ 228 case 8: __put_user_64(x, __pu_addr); break; \
229 default: __put_user_unknown(); break; \ 229 default: __put_user_unknown(); break; \
230 } \ 230 } \
231 } \ 231 } \
@@ -237,7 +237,7 @@ extern void __put_user_unknown(void);
237 * instead of writing: this is because they do not write to 237 * instead of writing: this is because they do not write to
238 * any memory gcc knows about, so there are no aliasing issues 238 * any memory gcc knows about, so there are no aliasing issues
239 */ 239 */
240#define __put_user_64(x,addr) \ 240#define __put_user_64(x, addr) \
241__asm__ __volatile__("1: stq %r2,%1\n" \ 241__asm__ __volatile__("1: stq %r2,%1\n" \
242 "2:\n" \ 242 "2:\n" \
243 ".section __ex_table,\"a\"\n" \ 243 ".section __ex_table,\"a\"\n" \
@@ -247,7 +247,7 @@ __asm__ __volatile__("1: stq %r2,%1\n" \
247 : "=r"(__pu_err) \ 247 : "=r"(__pu_err) \
248 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) 248 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
249 249
250#define __put_user_32(x,addr) \ 250#define __put_user_32(x, addr) \
251__asm__ __volatile__("1: stl %r2,%1\n" \ 251__asm__ __volatile__("1: stl %r2,%1\n" \
252 "2:\n" \ 252 "2:\n" \
253 ".section __ex_table,\"a\"\n" \ 253 ".section __ex_table,\"a\"\n" \
@@ -260,7 +260,7 @@ __asm__ __volatile__("1: stl %r2,%1\n" \
260#ifdef __alpha_bwx__ 260#ifdef __alpha_bwx__
261/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ 261/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
262 262
263#define __put_user_16(x,addr) \ 263#define __put_user_16(x, addr) \
264__asm__ __volatile__("1: stw %r2,%1\n" \ 264__asm__ __volatile__("1: stw %r2,%1\n" \
265 "2:\n" \ 265 "2:\n" \
266 ".section __ex_table,\"a\"\n" \ 266 ".section __ex_table,\"a\"\n" \
@@ -270,7 +270,7 @@ __asm__ __volatile__("1: stw %r2,%1\n" \
270 : "=r"(__pu_err) \ 270 : "=r"(__pu_err) \
271 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) 271 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
272 272
273#define __put_user_8(x,addr) \ 273#define __put_user_8(x, addr) \
274__asm__ __volatile__("1: stb %r2,%1\n" \ 274__asm__ __volatile__("1: stb %r2,%1\n" \
275 "2:\n" \ 275 "2:\n" \
276 ".section __ex_table,\"a\"\n" \ 276 ".section __ex_table,\"a\"\n" \
@@ -283,7 +283,7 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
283/* Unfortunately, we can't get an unaligned access trap for the sub-word 283/* Unfortunately, we can't get an unaligned access trap for the sub-word
284 write, so we have to do a general unaligned operation. */ 284 write, so we have to do a general unaligned operation. */
285 285
286#define __put_user_16(x,addr) \ 286#define __put_user_16(x, addr) \
287{ \ 287{ \
288 long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \ 288 long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
289 __asm__ __volatile__( \ 289 __asm__ __volatile__( \
@@ -308,13 +308,13 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
308 " .long 4b - .\n" \ 308 " .long 4b - .\n" \
309 " lda $31, 5b-4b(%0)\n" \ 309 " lda $31, 5b-4b(%0)\n" \
310 ".previous" \ 310 ".previous" \
311 : "=r"(__pu_err), "=&r"(__pu_tmp1), \ 311 : "=r"(__pu_err), "=&r"(__pu_tmp1), \
312 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ 312 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
313 "=&r"(__pu_tmp4) \ 313 "=&r"(__pu_tmp4) \
314 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \ 314 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
315} 315}
316 316
317#define __put_user_8(x,addr) \ 317#define __put_user_8(x, addr) \
318{ \ 318{ \
319 long __pu_tmp1, __pu_tmp2; \ 319 long __pu_tmp1, __pu_tmp2; \
320 __asm__ __volatile__( \ 320 __asm__ __volatile__( \
@@ -330,7 +330,7 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
330 " .long 2b - .\n" \ 330 " .long 2b - .\n" \
331 " lda $31, 3b-2b(%0)\n" \ 331 " lda $31, 3b-2b(%0)\n" \
332 ".previous" \ 332 ".previous" \
333 : "=r"(__pu_err), \ 333 : "=r"(__pu_err), \
334 "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ 334 "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
335 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ 335 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
336} 336}
@@ -366,7 +366,7 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
366 : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) 366 : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
367 : __module_address(__copy_user) 367 : __module_address(__copy_user)
368 "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) 368 "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
369 : "$1","$2","$3","$4","$5","$28","memory"); 369 : "$1", "$2", "$3", "$4", "$5", "$28", "memory");
370 370
371 return __cu_len; 371 return __cu_len;
372} 372}
@@ -379,15 +379,15 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
379 return len; 379 return len;
380} 380}
381 381
382#define __copy_to_user(to,from,n) \ 382#define __copy_to_user(to, from, n) \
383({ \ 383({ \
384 __chk_user_ptr(to); \ 384 __chk_user_ptr(to); \
385 __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \ 385 __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \
386}) 386})
387#define __copy_from_user(to,from,n) \ 387#define __copy_from_user(to, from, n) \
388({ \ 388({ \
389 __chk_user_ptr(from); \ 389 __chk_user_ptr(from); \
390 __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \ 390 __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \
391}) 391})
392 392
393#define __copy_to_user_inatomic __copy_to_user 393#define __copy_to_user_inatomic __copy_to_user
@@ -418,7 +418,7 @@ __clear_user(void __user *to, long len)
418 : "=r"(__cl_len), "=r"(__cl_to) 418 : "=r"(__cl_len), "=r"(__cl_to)
419 : __module_address(__do_clear_user) 419 : __module_address(__do_clear_user)
420 "0"(__cl_len), "1"(__cl_to) 420 "0"(__cl_len), "1"(__cl_to)
421 : "$1","$2","$3","$4","$5","$28","memory"); 421 : "$1", "$2", "$3", "$4", "$5", "$28", "memory");
422 return __cl_len; 422 return __cl_len;
423} 423}
424 424
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi
index a098d7c05e96..cfb5052239a1 100644
--- a/arch/arc/boot/dts/abilis_tb10x.dtsi
+++ b/arch/arc/boot/dts/abilis_tb10x.dtsi
@@ -112,7 +112,7 @@
112 chan_allocation_order = <0>; 112 chan_allocation_order = <0>;
113 chan_priority = <1>; 113 chan_priority = <1>;
114 block_size = <0x7ff>; 114 block_size = <0x7ff>;
115 data_width = <2 0 0 0>; 115 data_width = <2>;
116 clocks = <&ahb_clk>; 116 clocks = <&ahb_clk>;
117 clock-names = "hclk"; 117 clock-names = "hclk";
118 }; 118 };
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index 6cc25ed912ee..2c6248d9a9ef 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -195,6 +195,7 @@
195 195
196&usb0 { 196&usb0 {
197 status = "okay"; 197 status = "okay";
198 dr_mode = "peripheral";
198}; 199};
199 200
200&usb1 { 201&usb1 {
diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts
index f9a17e2ca8cb..0198f5a62b96 100644
--- a/arch/arm/boot/dts/am437x-idk-evm.dts
+++ b/arch/arm/boot/dts/am437x-idk-evm.dts
@@ -133,20 +133,6 @@
133 >; 133 >;
134 }; 134 };
135 135
136 i2c1_pins_default: i2c1_pins_default {
137 pinctrl-single,pins = <
138 0x15c (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_cs0.i2c1_scl */
139 0x158 (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_d1.i2c1_sda */
140 >;
141 };
142
143 i2c1_pins_sleep: i2c1_pins_sleep {
144 pinctrl-single,pins = <
145 0x15c (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_cs0.i2c1_scl */
146 0x158 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_d1.i2c1_sda */
147 >;
148 };
149
150 mmc1_pins_default: pinmux_mmc1_pins_default { 136 mmc1_pins_default: pinmux_mmc1_pins_default {
151 pinctrl-single,pins = < 137 pinctrl-single,pins = <
152 0x100 (PIN_INPUT | MUX_MODE0) /* mmc0_clk.mmc0_clk */ 138 0x100 (PIN_INPUT | MUX_MODE0) /* mmc0_clk.mmc0_clk */
@@ -254,7 +240,7 @@
254 status = "okay"; 240 status = "okay";
255 pinctrl-names = "default", "sleep"; 241 pinctrl-names = "default", "sleep";
256 pinctrl-0 = <&i2c0_pins_default>; 242 pinctrl-0 = <&i2c0_pins_default>;
257 pinctrl-1 = <&i2c0_pins_default>; 243 pinctrl-1 = <&i2c0_pins_sleep>;
258 clock-frequency = <400000>; 244 clock-frequency = <400000>;
259 245
260 at24@50 { 246 at24@50 {
@@ -262,17 +248,10 @@
262 pagesize = <64>; 248 pagesize = <64>;
263 reg = <0x50>; 249 reg = <0x50>;
264 }; 250 };
265};
266
267&i2c1 {
268 status = "okay";
269 pinctrl-names = "default", "sleep";
270 pinctrl-0 = <&i2c1_pins_default>;
271 pinctrl-1 = <&i2c1_pins_default>;
272 clock-frequency = <400000>;
273 251
274 tps: tps62362@60 { 252 tps: tps62362@60 {
275 compatible = "ti,tps62362"; 253 compatible = "ti,tps62362";
254 reg = <0x60>;
276 regulator-name = "VDD_MPU"; 255 regulator-name = "VDD_MPU";
277 regulator-min-microvolt = <950000>; 256 regulator-min-microvolt = <950000>;
278 regulator-max-microvolt = <1330000>; 257 regulator-max-microvolt = <1330000>;
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 03750af3b49a..6463f9ef2b54 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -549,14 +549,6 @@
549 pinctrl-0 = <&usb1_pins>; 549 pinctrl-0 = <&usb1_pins>;
550}; 550};
551 551
552&omap_dwc3_1 {
553 extcon = <&extcon_usb1>;
554};
555
556&omap_dwc3_2 {
557 extcon = <&extcon_usb2>;
558};
559
560&usb2 { 552&usb2 {
561 dr_mode = "peripheral"; 553 dr_mode = "peripheral";
562}; 554};
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
index 5126f9e77a98..ff5fb6ab0b97 100644
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
@@ -70,6 +70,26 @@
70 }; 70 };
71 }; 71 };
72 72
73 i2c0: i2c@18008000 {
74 compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c";
75 reg = <0x18008000 0x100>;
76 #address-cells = <1>;
77 #size-cells = <0>;
78 interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
79 clock-frequency = <100000>;
80 status = "disabled";
81 };
82
83 i2c1: i2c@1800b000 {
84 compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c";
85 reg = <0x1800b000 0x100>;
86 #address-cells = <1>;
87 #size-cells = <0>;
88 interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
89 clock-frequency = <100000>;
90 status = "disabled";
91 };
92
73 uart0: serial@18020000 { 93 uart0: serial@18020000 {
74 compatible = "snps,dw-apb-uart"; 94 compatible = "snps,dw-apb-uart";
75 reg = <0x18020000 0x100>; 95 reg = <0x18020000 0x100>;
diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
index d2d8e94e0aa2..f46329c8ad75 100644
--- a/arch/arm/boot/dts/bcm63138.dtsi
+++ b/arch/arm/boot/dts/bcm63138.dtsi
@@ -66,8 +66,9 @@
66 reg = <0x1d000 0x1000>; 66 reg = <0x1d000 0x1000>;
67 cache-unified; 67 cache-unified;
68 cache-level = <2>; 68 cache-level = <2>;
69 cache-sets = <16>; 69 cache-size = <524288>;
70 cache-size = <0x80000>; 70 cache-sets = <1024>;
71 cache-line-size = <32>;
71 interrupts = <GIC_PPI 0 IRQ_TYPE_LEVEL_HIGH>; 72 interrupts = <GIC_PPI 0 IRQ_TYPE_LEVEL_HIGH>;
72 }; 73 };
73 74
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts
index 857d0289ad4d..d3a29c1b8417 100644
--- a/arch/arm/boot/dts/dm8168-evm.dts
+++ b/arch/arm/boot/dts/dm8168-evm.dts
@@ -35,6 +35,18 @@
35 DM816X_IOPAD(0x0aac, PIN_INPUT | MUX_MODE0) /* SPI_D1 */ 35 DM816X_IOPAD(0x0aac, PIN_INPUT | MUX_MODE0) /* SPI_D1 */
36 >; 36 >;
37 }; 37 };
38
39 usb0_pins: pinmux_usb0_pins {
40 pinctrl-single,pins = <
41 DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */
42 >;
43 };
44
45 usb1_pins: pinmux_usb0_pins {
46 pinctrl-single,pins = <
47 DM816X_IOPAD(0x0d04, MUX_MODE0) /* USB1_DRVVBUS */
48 >;
49 };
38}; 50};
39 51
40&i2c1 { 52&i2c1 {
@@ -127,3 +139,16 @@
127&mmc1 { 139&mmc1 {
128 vmmc-supply = <&vmmcsd_fixed>; 140 vmmc-supply = <&vmmcsd_fixed>;
129}; 141};
142
143/* At least dm8168-evm rev c won't support multipoint, later may */
144&usb0 {
145 pinctrl-names = "default";
146 pinctrl-0 = <&usb0_pins>;
147 mentor,multipoint = <0>;
148};
149
150&usb1 {
151 pinctrl-names = "default";
152 pinctrl-0 = <&usb1_pins>;
153 mentor,multipoint = <0>;
154};
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index d98d0f7de380..3c97b5f2addc 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -97,10 +97,31 @@
97 97
98 /* Device Configuration Registers */ 98 /* Device Configuration Registers */
99 scm_conf: syscon@600 { 99 scm_conf: syscon@600 {
100 compatible = "syscon"; 100 compatible = "syscon", "simple-bus";
101 reg = <0x600 0x110>; 101 reg = <0x600 0x110>;
102 #address-cells = <1>; 102 #address-cells = <1>;
103 #size-cells = <1>; 103 #size-cells = <1>;
104 ranges = <0 0x600 0x110>;
105
106 usb_phy0: usb-phy@20 {
107 compatible = "ti,dm8168-usb-phy";
108 reg = <0x20 0x8>;
109 reg-names = "phy";
110 clocks = <&main_fapll 6>;
111 clock-names = "refclk";
112 #phy-cells = <0>;
113 syscon = <&scm_conf>;
114 };
115
116 usb_phy1: usb-phy@28 {
117 compatible = "ti,dm8168-usb-phy";
118 reg = <0x28 0x8>;
119 reg-names = "phy";
120 clocks = <&main_fapll 6>;
121 clock-names = "refclk";
122 #phy-cells = <0>;
123 syscon = <&scm_conf>;
124 };
104 }; 125 };
105 126
106 scrm_clocks: clocks { 127 scrm_clocks: clocks {
@@ -357,7 +378,10 @@
357 reg-names = "mc", "control"; 378 reg-names = "mc", "control";
358 interrupts = <18>; 379 interrupts = <18>;
359 interrupt-names = "mc"; 380 interrupt-names = "mc";
360 dr_mode = "otg"; 381 dr_mode = "host";
382 interface-type = <0>;
383 phys = <&usb_phy0>;
384 phy-names = "usb2-phy";
361 mentor,multipoint = <1>; 385 mentor,multipoint = <1>;
362 mentor,num-eps = <16>; 386 mentor,num-eps = <16>;
363 mentor,ram-bits = <12>; 387 mentor,ram-bits = <12>;
@@ -366,13 +390,15 @@
366 390
367 usb1: usb@47401800 { 391 usb1: usb@47401800 {
368 compatible = "ti,musb-am33xx"; 392 compatible = "ti,musb-am33xx";
369 status = "disabled";
370 reg = <0x47401c00 0x400 393 reg = <0x47401c00 0x400
371 0x47401800 0x200>; 394 0x47401800 0x200>;
372 reg-names = "mc", "control"; 395 reg-names = "mc", "control";
373 interrupts = <19>; 396 interrupts = <19>;
374 interrupt-names = "mc"; 397 interrupt-names = "mc";
375 dr_mode = "otg"; 398 dr_mode = "host";
399 interface-type = <0>;
400 phys = <&usb_phy1>;
401 phy-names = "usb2-phy";
376 mentor,multipoint = <1>; 402 mentor,multipoint = <1>;
377 mentor,num-eps = <16>; 403 mentor,num-eps = <16>;
378 mentor,ram-bits = <12>; 404 mentor,ram-bits = <12>;
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 746cddb1b8f5..3290a96ba586 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -543,14 +543,6 @@
543 }; 543 };
544}; 544};
545 545
546&omap_dwc3_1 {
547 extcon = <&extcon_usb1>;
548};
549
550&omap_dwc3_2 {
551 extcon = <&extcon_usb2>;
552};
553
554&usb1 { 546&usb1 {
555 dr_mode = "peripheral"; 547 dr_mode = "peripheral";
556 pinctrl-names = "default"; 548 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 5827fedafd43..127608d79033 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -249,8 +249,8 @@
249 <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, 249 <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
250 <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; 250 <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
251 #dma-cells = <1>; 251 #dma-cells = <1>;
252 #dma-channels = <32>; 252 dma-channels = <32>;
253 #dma-requests = <127>; 253 dma-requests = <127>;
254 }; 254 };
255 255
256 gpio1: gpio@4ae10000 { 256 gpio1: gpio@4ae10000 {
@@ -1090,8 +1090,8 @@
1090 <0x4A096800 0x40>; /* pll_ctrl */ 1090 <0x4A096800 0x40>; /* pll_ctrl */
1091 reg-names = "phy_rx", "phy_tx", "pll_ctrl"; 1091 reg-names = "phy_rx", "phy_tx", "pll_ctrl";
1092 ctrl-module = <&omap_control_sata>; 1092 ctrl-module = <&omap_control_sata>;
1093 clocks = <&sys_clkin1>; 1093 clocks = <&sys_clkin1>, <&sata_ref_clk>;
1094 clock-names = "sysclk"; 1094 clock-names = "sysclk", "refclk";
1095 #phy-cells = <0>; 1095 #phy-cells = <0>;
1096 }; 1096 };
1097 1097
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index 4d8711713610..e0264d0bf7b9 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -380,14 +380,6 @@
380 phy-supply = <&ldo4_reg>; 380 phy-supply = <&ldo4_reg>;
381}; 381};
382 382
383&omap_dwc3_1 {
384 extcon = <&extcon_usb1>;
385};
386
387&omap_dwc3_2 {
388 extcon = <&extcon_usb2>;
389};
390
391&usb1 { 383&usb1 {
392 dr_mode = "peripheral"; 384 dr_mode = "peripheral";
393 pinctrl-names = "default"; 385 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index 59d1c297bb30..578fa2a54dce 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -87,8 +87,8 @@
87 <14>, 87 <14>,
88 <15>; 88 <15>;
89 #dma-cells = <1>; 89 #dma-cells = <1>;
90 #dma-channels = <32>; 90 dma-channels = <32>;
91 #dma-requests = <64>; 91 dma-requests = <64>;
92 }; 92 };
93 93
94 i2c1: i2c@48070000 { 94 i2c1: i2c@48070000 {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 60403273f83e..db80f9d376fa 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -16,6 +16,13 @@
16 model = "Nokia N900"; 16 model = "Nokia N900";
17 compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3"; 17 compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3";
18 18
19 aliases {
20 i2c0;
21 i2c1 = &i2c1;
22 i2c2 = &i2c2;
23 i2c3 = &i2c3;
24 };
25
19 cpus { 26 cpus {
20 cpu@0 { 27 cpu@0 {
21 cpu0-supply = <&vcc>; 28 cpu0-supply = <&vcc>;
@@ -704,7 +711,7 @@
704 compatible = "smsc,lan91c94"; 711 compatible = "smsc,lan91c94";
705 interrupt-parent = <&gpio2>; 712 interrupt-parent = <&gpio2>;
706 interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; /* gpio54 */ 713 interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; /* gpio54 */
707 reg = <1 0x300 0xf>; /* 16 byte IO range at offset 0x300 */ 714 reg = <1 0 0xf>; /* 16 byte IO range */
708 bank-width = <2>; 715 bank-width = <2>;
709 pinctrl-names = "default"; 716 pinctrl-names = "default";
710 pinctrl-0 = <&ethernet_pins>; 717 pinctrl-0 = <&ethernet_pins>;
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 01b71111bd55..f4f78c40b564 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -155,8 +155,8 @@
155 <14>, 155 <14>,
156 <15>; 156 <15>;
157 #dma-cells = <1>; 157 #dma-cells = <1>;
158 #dma-channels = <32>; 158 dma-channels = <32>;
159 #dma-requests = <96>; 159 dma-requests = <96>;
160 }; 160 };
161 161
162 omap3_pmx_core: pinmux@48002030 { 162 omap3_pmx_core: pinmux@48002030 {
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 074147cebae4..87401d9f4d8b 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -223,8 +223,8 @@
223 <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, 223 <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
224 <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; 224 <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
225 #dma-cells = <1>; 225 #dma-cells = <1>;
226 #dma-channels = <32>; 226 dma-channels = <32>;
227 #dma-requests = <127>; 227 dma-requests = <127>;
228 }; 228 };
229 229
230 gpio1: gpio@4a310000 { 230 gpio1: gpio@4a310000 {
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index b321fdf42c9f..ddff674bd05e 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -238,8 +238,8 @@
238 <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, 238 <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
239 <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; 239 <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
240 #dma-cells = <1>; 240 #dma-cells = <1>;
241 #dma-channels = <32>; 241 dma-channels = <32>;
242 #dma-requests = <127>; 242 dma-requests = <127>;
243 }; 243 };
244 244
245 gpio1: gpio@4ae10000 { 245 gpio1: gpio@4ae10000 {
@@ -929,8 +929,8 @@
929 <0x4A096800 0x40>; /* pll_ctrl */ 929 <0x4A096800 0x40>; /* pll_ctrl */
930 reg-names = "phy_rx", "phy_tx", "pll_ctrl"; 930 reg-names = "phy_rx", "phy_tx", "pll_ctrl";
931 ctrl-module = <&omap_control_sata>; 931 ctrl-module = <&omap_control_sata>;
932 clocks = <&sys_clkin>; 932 clocks = <&sys_clkin>, <&sata_ref_clk>;
933 clock-names = "sysclk"; 933 clock-names = "sysclk", "refclk";
934 #phy-cells = <0>; 934 #phy-cells = <0>;
935 }; 935 };
936 }; 936 };
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index a6eb5436d26d..40accc87e3a2 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -117,7 +117,7 @@
117 chan_priority = <1>; 117 chan_priority = <1>;
118 block_size = <0xfff>; 118 block_size = <0xfff>;
119 dma-masters = <2>; 119 dma-masters = <2>;
120 data_width = <3 3 0 0>; 120 data_width = <3 3>;
121 }; 121 };
122 122
123 dma@eb000000 { 123 dma@eb000000 {
@@ -133,7 +133,7 @@
133 chan_allocation_order = <1>; 133 chan_allocation_order = <1>;
134 chan_priority = <1>; 134 chan_priority = <1>;
135 block_size = <0xfff>; 135 block_size = <0xfff>;
136 data_width = <3 3 0 0>; 136 data_width = <3 3>;
137 }; 137 };
138 138
139 fsmc: flash@b0000000 { 139 fsmc: flash@b0000000 {
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 8ca3c1a2063d..5c2925831f20 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -294,35 +294,43 @@
294 }; 294 };
295 295
296 mmc0_clk: clk@01c20088 { 296 mmc0_clk: clk@01c20088 {
297 #clock-cells = <0>; 297 #clock-cells = <1>;
298 compatible = "allwinner,sun4i-a10-mod0-clk"; 298 compatible = "allwinner,sun4i-a10-mmc-clk";
299 reg = <0x01c20088 0x4>; 299 reg = <0x01c20088 0x4>;
300 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 300 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
301 clock-output-names = "mmc0"; 301 clock-output-names = "mmc0",
302 "mmc0_output",
303 "mmc0_sample";
302 }; 304 };
303 305
304 mmc1_clk: clk@01c2008c { 306 mmc1_clk: clk@01c2008c {
305 #clock-cells = <0>; 307 #clock-cells = <1>;
306 compatible = "allwinner,sun4i-a10-mod0-clk"; 308 compatible = "allwinner,sun4i-a10-mmc-clk";
307 reg = <0x01c2008c 0x4>; 309 reg = <0x01c2008c 0x4>;
308 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 310 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
309 clock-output-names = "mmc1"; 311 clock-output-names = "mmc1",
312 "mmc1_output",
313 "mmc1_sample";
310 }; 314 };
311 315
312 mmc2_clk: clk@01c20090 { 316 mmc2_clk: clk@01c20090 {
313 #clock-cells = <0>; 317 #clock-cells = <1>;
314 compatible = "allwinner,sun4i-a10-mod0-clk"; 318 compatible = "allwinner,sun4i-a10-mmc-clk";
315 reg = <0x01c20090 0x4>; 319 reg = <0x01c20090 0x4>;
316 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 320 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
317 clock-output-names = "mmc2"; 321 clock-output-names = "mmc2",
322 "mmc2_output",
323 "mmc2_sample";
318 }; 324 };
319 325
320 mmc3_clk: clk@01c20094 { 326 mmc3_clk: clk@01c20094 {
321 #clock-cells = <0>; 327 #clock-cells = <1>;
322 compatible = "allwinner,sun4i-a10-mod0-clk"; 328 compatible = "allwinner,sun4i-a10-mmc-clk";
323 reg = <0x01c20094 0x4>; 329 reg = <0x01c20094 0x4>;
324 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 330 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
325 clock-output-names = "mmc3"; 331 clock-output-names = "mmc3",
332 "mmc3_output",
333 "mmc3_sample";
326 }; 334 };
327 335
328 ts_clk: clk@01c20098 { 336 ts_clk: clk@01c20098 {
@@ -468,8 +476,14 @@
468 mmc0: mmc@01c0f000 { 476 mmc0: mmc@01c0f000 {
469 compatible = "allwinner,sun4i-a10-mmc"; 477 compatible = "allwinner,sun4i-a10-mmc";
470 reg = <0x01c0f000 0x1000>; 478 reg = <0x01c0f000 0x1000>;
471 clocks = <&ahb_gates 8>, <&mmc0_clk>; 479 clocks = <&ahb_gates 8>,
472 clock-names = "ahb", "mmc"; 480 <&mmc0_clk 0>,
481 <&mmc0_clk 1>,
482 <&mmc0_clk 2>;
483 clock-names = "ahb",
484 "mmc",
485 "output",
486 "sample";
473 interrupts = <32>; 487 interrupts = <32>;
474 status = "disabled"; 488 status = "disabled";
475 }; 489 };
@@ -477,8 +491,14 @@
477 mmc1: mmc@01c10000 { 491 mmc1: mmc@01c10000 {
478 compatible = "allwinner,sun4i-a10-mmc"; 492 compatible = "allwinner,sun4i-a10-mmc";
479 reg = <0x01c10000 0x1000>; 493 reg = <0x01c10000 0x1000>;
480 clocks = <&ahb_gates 9>, <&mmc1_clk>; 494 clocks = <&ahb_gates 9>,
481 clock-names = "ahb", "mmc"; 495 <&mmc1_clk 0>,
496 <&mmc1_clk 1>,
497 <&mmc1_clk 2>;
498 clock-names = "ahb",
499 "mmc",
500 "output",
501 "sample";
482 interrupts = <33>; 502 interrupts = <33>;
483 status = "disabled"; 503 status = "disabled";
484 }; 504 };
@@ -486,8 +506,14 @@
486 mmc2: mmc@01c11000 { 506 mmc2: mmc@01c11000 {
487 compatible = "allwinner,sun4i-a10-mmc"; 507 compatible = "allwinner,sun4i-a10-mmc";
488 reg = <0x01c11000 0x1000>; 508 reg = <0x01c11000 0x1000>;
489 clocks = <&ahb_gates 10>, <&mmc2_clk>; 509 clocks = <&ahb_gates 10>,
490 clock-names = "ahb", "mmc"; 510 <&mmc2_clk 0>,
511 <&mmc2_clk 1>,
512 <&mmc2_clk 2>;
513 clock-names = "ahb",
514 "mmc",
515 "output",
516 "sample";
491 interrupts = <34>; 517 interrupts = <34>;
492 status = "disabled"; 518 status = "disabled";
493 }; 519 };
@@ -495,8 +521,14 @@
495 mmc3: mmc@01c12000 { 521 mmc3: mmc@01c12000 {
496 compatible = "allwinner,sun4i-a10-mmc"; 522 compatible = "allwinner,sun4i-a10-mmc";
497 reg = <0x01c12000 0x1000>; 523 reg = <0x01c12000 0x1000>;
498 clocks = <&ahb_gates 11>, <&mmc3_clk>; 524 clocks = <&ahb_gates 11>,
499 clock-names = "ahb", "mmc"; 525 <&mmc3_clk 0>,
526 <&mmc3_clk 1>,
527 <&mmc3_clk 2>;
528 clock-names = "ahb",
529 "mmc",
530 "output",
531 "sample";
500 interrupts = <35>; 532 interrupts = <35>;
501 status = "disabled"; 533 status = "disabled";
502 }; 534 };
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 905f84d141f0..2fd8988f310c 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -218,27 +218,33 @@
218 }; 218 };
219 219
220 mmc0_clk: clk@01c20088 { 220 mmc0_clk: clk@01c20088 {
221 #clock-cells = <0>; 221 #clock-cells = <1>;
222 compatible = "allwinner,sun4i-a10-mod0-clk"; 222 compatible = "allwinner,sun4i-a10-mmc-clk";
223 reg = <0x01c20088 0x4>; 223 reg = <0x01c20088 0x4>;
224 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 224 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
225 clock-output-names = "mmc0"; 225 clock-output-names = "mmc0",
226 "mmc0_output",
227 "mmc0_sample";
226 }; 228 };
227 229
228 mmc1_clk: clk@01c2008c { 230 mmc1_clk: clk@01c2008c {
229 #clock-cells = <0>; 231 #clock-cells = <1>;
230 compatible = "allwinner,sun4i-a10-mod0-clk"; 232 compatible = "allwinner,sun4i-a10-mmc-clk";
231 reg = <0x01c2008c 0x4>; 233 reg = <0x01c2008c 0x4>;
232 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 234 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
233 clock-output-names = "mmc1"; 235 clock-output-names = "mmc1",
236 "mmc1_output",
237 "mmc1_sample";
234 }; 238 };
235 239
236 mmc2_clk: clk@01c20090 { 240 mmc2_clk: clk@01c20090 {
237 #clock-cells = <0>; 241 #clock-cells = <1>;
238 compatible = "allwinner,sun4i-a10-mod0-clk"; 242 compatible = "allwinner,sun4i-a10-mmc-clk";
239 reg = <0x01c20090 0x4>; 243 reg = <0x01c20090 0x4>;
240 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 244 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
241 clock-output-names = "mmc2"; 245 clock-output-names = "mmc2",
246 "mmc2_output",
247 "mmc2_sample";
242 }; 248 };
243 249
244 ts_clk: clk@01c20098 { 250 ts_clk: clk@01c20098 {
@@ -368,8 +374,14 @@
368 mmc0: mmc@01c0f000 { 374 mmc0: mmc@01c0f000 {
369 compatible = "allwinner,sun5i-a13-mmc"; 375 compatible = "allwinner,sun5i-a13-mmc";
370 reg = <0x01c0f000 0x1000>; 376 reg = <0x01c0f000 0x1000>;
371 clocks = <&ahb_gates 8>, <&mmc0_clk>; 377 clocks = <&ahb_gates 8>,
372 clock-names = "ahb", "mmc"; 378 <&mmc0_clk 0>,
379 <&mmc0_clk 1>,
380 <&mmc0_clk 2>;
381 clock-names = "ahb",
382 "mmc",
383 "output",
384 "sample";
373 interrupts = <32>; 385 interrupts = <32>;
374 status = "disabled"; 386 status = "disabled";
375 }; 387 };
@@ -377,8 +389,14 @@
377 mmc1: mmc@01c10000 { 389 mmc1: mmc@01c10000 {
378 compatible = "allwinner,sun5i-a13-mmc"; 390 compatible = "allwinner,sun5i-a13-mmc";
379 reg = <0x01c10000 0x1000>; 391 reg = <0x01c10000 0x1000>;
380 clocks = <&ahb_gates 9>, <&mmc1_clk>; 392 clocks = <&ahb_gates 9>,
381 clock-names = "ahb", "mmc"; 393 <&mmc1_clk 0>,
394 <&mmc1_clk 1>,
395 <&mmc1_clk 2>;
396 clock-names = "ahb",
397 "mmc",
398 "output",
399 "sample";
382 interrupts = <33>; 400 interrupts = <33>;
383 status = "disabled"; 401 status = "disabled";
384 }; 402 };
@@ -386,8 +404,14 @@
386 mmc2: mmc@01c11000 { 404 mmc2: mmc@01c11000 {
387 compatible = "allwinner,sun5i-a13-mmc"; 405 compatible = "allwinner,sun5i-a13-mmc";
388 reg = <0x01c11000 0x1000>; 406 reg = <0x01c11000 0x1000>;
389 clocks = <&ahb_gates 10>, <&mmc2_clk>; 407 clocks = <&ahb_gates 10>,
390 clock-names = "ahb", "mmc"; 408 <&mmc2_clk 0>,
409 <&mmc2_clk 1>,
410 <&mmc2_clk 2>;
411 clock-names = "ahb",
412 "mmc",
413 "output",
414 "sample";
391 interrupts = <34>; 415 interrupts = <34>;
392 status = "disabled"; 416 status = "disabled";
393 }; 417 };
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 4910393d1b09..f8818f1edbbe 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -257,27 +257,33 @@
257 }; 257 };
258 258
259 mmc0_clk: clk@01c20088 { 259 mmc0_clk: clk@01c20088 {
260 #clock-cells = <0>; 260 #clock-cells = <1>;
261 compatible = "allwinner,sun4i-a10-mod0-clk"; 261 compatible = "allwinner,sun4i-a10-mmc-clk";
262 reg = <0x01c20088 0x4>; 262 reg = <0x01c20088 0x4>;
263 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 263 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
264 clock-output-names = "mmc0"; 264 clock-output-names = "mmc0",
265 "mmc0_output",
266 "mmc0_sample";
265 }; 267 };
266 268
267 mmc1_clk: clk@01c2008c { 269 mmc1_clk: clk@01c2008c {
268 #clock-cells = <0>; 270 #clock-cells = <1>;
269 compatible = "allwinner,sun4i-a10-mod0-clk"; 271 compatible = "allwinner,sun4i-a10-mmc-clk";
270 reg = <0x01c2008c 0x4>; 272 reg = <0x01c2008c 0x4>;
271 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 273 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
272 clock-output-names = "mmc1"; 274 clock-output-names = "mmc1",
275 "mmc1_output",
276 "mmc1_sample";
273 }; 277 };
274 278
275 mmc2_clk: clk@01c20090 { 279 mmc2_clk: clk@01c20090 {
276 #clock-cells = <0>; 280 #clock-cells = <1>;
277 compatible = "allwinner,sun4i-a10-mod0-clk"; 281 compatible = "allwinner,sun4i-a10-mmc-clk";
278 reg = <0x01c20090 0x4>; 282 reg = <0x01c20090 0x4>;
279 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 283 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
280 clock-output-names = "mmc2"; 284 clock-output-names = "mmc2",
285 "mmc2_output",
286 "mmc2_sample";
281 }; 287 };
282 288
283 ts_clk: clk@01c20098 { 289 ts_clk: clk@01c20098 {
@@ -391,8 +397,14 @@
391 mmc0: mmc@01c0f000 { 397 mmc0: mmc@01c0f000 {
392 compatible = "allwinner,sun5i-a13-mmc"; 398 compatible = "allwinner,sun5i-a13-mmc";
393 reg = <0x01c0f000 0x1000>; 399 reg = <0x01c0f000 0x1000>;
394 clocks = <&ahb_gates 8>, <&mmc0_clk>; 400 clocks = <&ahb_gates 8>,
395 clock-names = "ahb", "mmc"; 401 <&mmc0_clk 0>,
402 <&mmc0_clk 1>,
403 <&mmc0_clk 2>;
404 clock-names = "ahb",
405 "mmc",
406 "output",
407 "sample";
396 interrupts = <32>; 408 interrupts = <32>;
397 status = "disabled"; 409 status = "disabled";
398 }; 410 };
@@ -400,8 +412,14 @@
400 mmc2: mmc@01c11000 { 412 mmc2: mmc@01c11000 {
401 compatible = "allwinner,sun5i-a13-mmc"; 413 compatible = "allwinner,sun5i-a13-mmc";
402 reg = <0x01c11000 0x1000>; 414 reg = <0x01c11000 0x1000>;
403 clocks = <&ahb_gates 10>, <&mmc2_clk>; 415 clocks = <&ahb_gates 10>,
404 clock-names = "ahb", "mmc"; 416 <&mmc2_clk 0>,
417 <&mmc2_clk 1>,
418 <&mmc2_clk 2>;
419 clock-names = "ahb",
420 "mmc",
421 "output",
422 "sample";
405 interrupts = <34>; 423 interrupts = <34>;
406 status = "disabled"; 424 status = "disabled";
407 }; 425 };
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 47e557656993..fa2f403ccf28 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -190,19 +190,11 @@
190 clock-output-names = "axi"; 190 clock-output-names = "axi";
191 }; 191 };
192 192
193 ahb1_mux: ahb1_mux@01c20054 {
194 #clock-cells = <0>;
195 compatible = "allwinner,sun6i-a31-ahb1-mux-clk";
196 reg = <0x01c20054 0x4>;
197 clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
198 clock-output-names = "ahb1_mux";
199 };
200
201 ahb1: ahb1@01c20054 { 193 ahb1: ahb1@01c20054 {
202 #clock-cells = <0>; 194 #clock-cells = <0>;
203 compatible = "allwinner,sun4i-a10-ahb-clk"; 195 compatible = "allwinner,sun6i-a31-ahb1-clk";
204 reg = <0x01c20054 0x4>; 196 reg = <0x01c20054 0x4>;
205 clocks = <&ahb1_mux>; 197 clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
206 clock-output-names = "ahb1"; 198 clock-output-names = "ahb1";
207 }; 199 };
208 200
@@ -265,35 +257,43 @@
265 }; 257 };
266 258
267 mmc0_clk: clk@01c20088 { 259 mmc0_clk: clk@01c20088 {
268 #clock-cells = <0>; 260 #clock-cells = <1>;
269 compatible = "allwinner,sun4i-a10-mod0-clk"; 261 compatible = "allwinner,sun4i-a10-mmc-clk";
270 reg = <0x01c20088 0x4>; 262 reg = <0x01c20088 0x4>;
271 clocks = <&osc24M>, <&pll6 0>; 263 clocks = <&osc24M>, <&pll6 0>;
272 clock-output-names = "mmc0"; 264 clock-output-names = "mmc0",
265 "mmc0_output",
266 "mmc0_sample";
273 }; 267 };
274 268
275 mmc1_clk: clk@01c2008c { 269 mmc1_clk: clk@01c2008c {
276 #clock-cells = <0>; 270 #clock-cells = <1>;
277 compatible = "allwinner,sun4i-a10-mod0-clk"; 271 compatible = "allwinner,sun4i-a10-mmc-clk";
278 reg = <0x01c2008c 0x4>; 272 reg = <0x01c2008c 0x4>;
279 clocks = <&osc24M>, <&pll6 0>; 273 clocks = <&osc24M>, <&pll6 0>;
280 clock-output-names = "mmc1"; 274 clock-output-names = "mmc1",
275 "mmc1_output",
276 "mmc1_sample";
281 }; 277 };
282 278
283 mmc2_clk: clk@01c20090 { 279 mmc2_clk: clk@01c20090 {
284 #clock-cells = <0>; 280 #clock-cells = <1>;
285 compatible = "allwinner,sun4i-a10-mod0-clk"; 281 compatible = "allwinner,sun4i-a10-mmc-clk";
286 reg = <0x01c20090 0x4>; 282 reg = <0x01c20090 0x4>;
287 clocks = <&osc24M>, <&pll6 0>; 283 clocks = <&osc24M>, <&pll6 0>;
288 clock-output-names = "mmc2"; 284 clock-output-names = "mmc2",
285 "mmc2_output",
286 "mmc2_sample";
289 }; 287 };
290 288
291 mmc3_clk: clk@01c20094 { 289 mmc3_clk: clk@01c20094 {
292 #clock-cells = <0>; 290 #clock-cells = <1>;
293 compatible = "allwinner,sun4i-a10-mod0-clk"; 291 compatible = "allwinner,sun4i-a10-mmc-clk";
294 reg = <0x01c20094 0x4>; 292 reg = <0x01c20094 0x4>;
295 clocks = <&osc24M>, <&pll6 0>; 293 clocks = <&osc24M>, <&pll6 0>;
296 clock-output-names = "mmc3"; 294 clock-output-names = "mmc3",
295 "mmc3_output",
296 "mmc3_sample";
297 }; 297 };
298 298
299 spi0_clk: clk@01c200a0 { 299 spi0_clk: clk@01c200a0 {
@@ -383,15 +383,21 @@
383 #dma-cells = <1>; 383 #dma-cells = <1>;
384 384
385 /* DMA controller requires AHB1 clocked from PLL6 */ 385 /* DMA controller requires AHB1 clocked from PLL6 */
386 assigned-clocks = <&ahb1_mux>; 386 assigned-clocks = <&ahb1>;
387 assigned-clock-parents = <&pll6 0>; 387 assigned-clock-parents = <&pll6 0>;
388 }; 388 };
389 389
390 mmc0: mmc@01c0f000 { 390 mmc0: mmc@01c0f000 {
391 compatible = "allwinner,sun5i-a13-mmc"; 391 compatible = "allwinner,sun5i-a13-mmc";
392 reg = <0x01c0f000 0x1000>; 392 reg = <0x01c0f000 0x1000>;
393 clocks = <&ahb1_gates 8>, <&mmc0_clk>; 393 clocks = <&ahb1_gates 8>,
394 clock-names = "ahb", "mmc"; 394 <&mmc0_clk 0>,
395 <&mmc0_clk 1>,
396 <&mmc0_clk 2>;
397 clock-names = "ahb",
398 "mmc",
399 "output",
400 "sample";
395 resets = <&ahb1_rst 8>; 401 resets = <&ahb1_rst 8>;
396 reset-names = "ahb"; 402 reset-names = "ahb";
397 interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>; 403 interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
@@ -401,8 +407,14 @@
401 mmc1: mmc@01c10000 { 407 mmc1: mmc@01c10000 {
402 compatible = "allwinner,sun5i-a13-mmc"; 408 compatible = "allwinner,sun5i-a13-mmc";
403 reg = <0x01c10000 0x1000>; 409 reg = <0x01c10000 0x1000>;
404 clocks = <&ahb1_gates 9>, <&mmc1_clk>; 410 clocks = <&ahb1_gates 9>,
405 clock-names = "ahb", "mmc"; 411 <&mmc1_clk 0>,
412 <&mmc1_clk 1>,
413 <&mmc1_clk 2>;
414 clock-names = "ahb",
415 "mmc",
416 "output",
417 "sample";
406 resets = <&ahb1_rst 9>; 418 resets = <&ahb1_rst 9>;
407 reset-names = "ahb"; 419 reset-names = "ahb";
408 interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>; 420 interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
@@ -412,8 +424,14 @@
412 mmc2: mmc@01c11000 { 424 mmc2: mmc@01c11000 {
413 compatible = "allwinner,sun5i-a13-mmc"; 425 compatible = "allwinner,sun5i-a13-mmc";
414 reg = <0x01c11000 0x1000>; 426 reg = <0x01c11000 0x1000>;
415 clocks = <&ahb1_gates 10>, <&mmc2_clk>; 427 clocks = <&ahb1_gates 10>,
416 clock-names = "ahb", "mmc"; 428 <&mmc2_clk 0>,
429 <&mmc2_clk 1>,
430 <&mmc2_clk 2>;
431 clock-names = "ahb",
432 "mmc",
433 "output",
434 "sample";
417 resets = <&ahb1_rst 10>; 435 resets = <&ahb1_rst 10>;
418 reset-names = "ahb"; 436 reset-names = "ahb";
419 interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>; 437 interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
@@ -423,8 +441,14 @@
423 mmc3: mmc@01c12000 { 441 mmc3: mmc@01c12000 {
424 compatible = "allwinner,sun5i-a13-mmc"; 442 compatible = "allwinner,sun5i-a13-mmc";
425 reg = <0x01c12000 0x1000>; 443 reg = <0x01c12000 0x1000>;
426 clocks = <&ahb1_gates 11>, <&mmc3_clk>; 444 clocks = <&ahb1_gates 11>,
427 clock-names = "ahb", "mmc"; 445 <&mmc3_clk 0>,
446 <&mmc3_clk 1>,
447 <&mmc3_clk 2>;
448 clock-names = "ahb",
449 "mmc",
450 "output",
451 "sample";
428 resets = <&ahb1_rst 11>; 452 resets = <&ahb1_rst 11>;
429 reset-names = "ahb"; 453 reset-names = "ahb";
430 interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>; 454 interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 786d491542ac..3a8530b79f1c 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -337,35 +337,43 @@
337 }; 337 };
338 338
339 mmc0_clk: clk@01c20088 { 339 mmc0_clk: clk@01c20088 {
340 #clock-cells = <0>; 340 #clock-cells = <1>;
341 compatible = "allwinner,sun4i-a10-mod0-clk"; 341 compatible = "allwinner,sun4i-a10-mmc-clk";
342 reg = <0x01c20088 0x4>; 342 reg = <0x01c20088 0x4>;
343 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 343 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
344 clock-output-names = "mmc0"; 344 clock-output-names = "mmc0",
345 "mmc0_output",
346 "mmc0_sample";
345 }; 347 };
346 348
347 mmc1_clk: clk@01c2008c { 349 mmc1_clk: clk@01c2008c {
348 #clock-cells = <0>; 350 #clock-cells = <1>;
349 compatible = "allwinner,sun4i-a10-mod0-clk"; 351 compatible = "allwinner,sun4i-a10-mmc-clk";
350 reg = <0x01c2008c 0x4>; 352 reg = <0x01c2008c 0x4>;
351 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 353 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
352 clock-output-names = "mmc1"; 354 clock-output-names = "mmc1",
355 "mmc1_output",
356 "mmc1_sample";
353 }; 357 };
354 358
355 mmc2_clk: clk@01c20090 { 359 mmc2_clk: clk@01c20090 {
356 #clock-cells = <0>; 360 #clock-cells = <1>;
357 compatible = "allwinner,sun4i-a10-mod0-clk"; 361 compatible = "allwinner,sun4i-a10-mmc-clk";
358 reg = <0x01c20090 0x4>; 362 reg = <0x01c20090 0x4>;
359 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 363 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
360 clock-output-names = "mmc2"; 364 clock-output-names = "mmc2",
365 "mmc2_output",
366 "mmc2_sample";
361 }; 367 };
362 368
363 mmc3_clk: clk@01c20094 { 369 mmc3_clk: clk@01c20094 {
364 #clock-cells = <0>; 370 #clock-cells = <1>;
365 compatible = "allwinner,sun4i-a10-mod0-clk"; 371 compatible = "allwinner,sun4i-a10-mmc-clk";
366 reg = <0x01c20094 0x4>; 372 reg = <0x01c20094 0x4>;
367 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; 373 clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
368 clock-output-names = "mmc3"; 374 clock-output-names = "mmc3",
375 "mmc3_output",
376 "mmc3_sample";
369 }; 377 };
370 378
371 ts_clk: clk@01c20098 { 379 ts_clk: clk@01c20098 {
@@ -583,8 +591,14 @@
583 mmc0: mmc@01c0f000 { 591 mmc0: mmc@01c0f000 {
584 compatible = "allwinner,sun5i-a13-mmc"; 592 compatible = "allwinner,sun5i-a13-mmc";
585 reg = <0x01c0f000 0x1000>; 593 reg = <0x01c0f000 0x1000>;
586 clocks = <&ahb_gates 8>, <&mmc0_clk>; 594 clocks = <&ahb_gates 8>,
587 clock-names = "ahb", "mmc"; 595 <&mmc0_clk 0>,
596 <&mmc0_clk 1>,
597 <&mmc0_clk 2>;
598 clock-names = "ahb",
599 "mmc",
600 "output",
601 "sample";
588 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; 602 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
589 status = "disabled"; 603 status = "disabled";
590 }; 604 };
@@ -592,8 +606,14 @@
592 mmc1: mmc@01c10000 { 606 mmc1: mmc@01c10000 {
593 compatible = "allwinner,sun5i-a13-mmc"; 607 compatible = "allwinner,sun5i-a13-mmc";
594 reg = <0x01c10000 0x1000>; 608 reg = <0x01c10000 0x1000>;
595 clocks = <&ahb_gates 9>, <&mmc1_clk>; 609 clocks = <&ahb_gates 9>,
596 clock-names = "ahb", "mmc"; 610 <&mmc1_clk 0>,
611 <&mmc1_clk 1>,
612 <&mmc1_clk 2>;
613 clock-names = "ahb",
614 "mmc",
615 "output",
616 "sample";
597 interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; 617 interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
598 status = "disabled"; 618 status = "disabled";
599 }; 619 };
@@ -601,8 +621,14 @@
601 mmc2: mmc@01c11000 { 621 mmc2: mmc@01c11000 {
602 compatible = "allwinner,sun5i-a13-mmc"; 622 compatible = "allwinner,sun5i-a13-mmc";
603 reg = <0x01c11000 0x1000>; 623 reg = <0x01c11000 0x1000>;
604 clocks = <&ahb_gates 10>, <&mmc2_clk>; 624 clocks = <&ahb_gates 10>,
605 clock-names = "ahb", "mmc"; 625 <&mmc2_clk 0>,
626 <&mmc2_clk 1>,
627 <&mmc2_clk 2>;
628 clock-names = "ahb",
629 "mmc",
630 "output",
631 "sample";
606 interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>; 632 interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
607 status = "disabled"; 633 status = "disabled";
608 }; 634 };
@@ -610,8 +636,14 @@
610 mmc3: mmc@01c12000 { 636 mmc3: mmc@01c12000 {
611 compatible = "allwinner,sun5i-a13-mmc"; 637 compatible = "allwinner,sun5i-a13-mmc";
612 reg = <0x01c12000 0x1000>; 638 reg = <0x01c12000 0x1000>;
613 clocks = <&ahb_gates 11>, <&mmc3_clk>; 639 clocks = <&ahb_gates 11>,
614 clock-names = "ahb", "mmc"; 640 <&mmc3_clk 0>,
641 <&mmc3_clk 1>,
642 <&mmc3_clk 2>;
643 clock-names = "ahb",
644 "mmc",
645 "output",
646 "sample";
615 interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>; 647 interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
616 status = "disabled"; 648 status = "disabled";
617 }; 649 };
diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi
index dd34527293e4..382ebd137ee4 100644
--- a/arch/arm/boot/dts/sun8i-a23.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23.dtsi
@@ -119,11 +119,19 @@
119 }; 119 };
120 120
121 /* dummy clock until actually implemented */ 121 /* dummy clock until actually implemented */
122 pll6: pll6_clk { 122 pll5: pll5_clk {
123 #clock-cells = <0>; 123 #clock-cells = <0>;
124 compatible = "fixed-clock"; 124 compatible = "fixed-clock";
125 clock-frequency = <600000000>; 125 clock-frequency = <0>;
126 clock-output-names = "pll6"; 126 clock-output-names = "pll5";
127 };
128
129 pll6: clk@01c20028 {
130 #clock-cells = <1>;
131 compatible = "allwinner,sun6i-a31-pll6-clk";
132 reg = <0x01c20028 0x4>;
133 clocks = <&osc24M>;
134 clock-output-names = "pll6", "pll6x2";
127 }; 135 };
128 136
129 cpu: cpu_clk@01c20050 { 137 cpu: cpu_clk@01c20050 {
@@ -149,19 +157,11 @@
149 clock-output-names = "axi"; 157 clock-output-names = "axi";
150 }; 158 };
151 159
152 ahb1_mux: ahb1_mux_clk@01c20054 {
153 #clock-cells = <0>;
154 compatible = "allwinner,sun6i-a31-ahb1-mux-clk";
155 reg = <0x01c20054 0x4>;
156 clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>;
157 clock-output-names = "ahb1_mux";
158 };
159
160 ahb1: ahb1_clk@01c20054 { 160 ahb1: ahb1_clk@01c20054 {
161 #clock-cells = <0>; 161 #clock-cells = <0>;
162 compatible = "allwinner,sun4i-a10-ahb-clk"; 162 compatible = "allwinner,sun6i-a31-ahb1-clk";
163 reg = <0x01c20054 0x4>; 163 reg = <0x01c20054 0x4>;
164 clocks = <&ahb1_mux>; 164 clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
165 clock-output-names = "ahb1"; 165 clock-output-names = "ahb1";
166 }; 166 };
167 167
@@ -202,7 +202,7 @@
202 #clock-cells = <0>; 202 #clock-cells = <0>;
203 compatible = "allwinner,sun4i-a10-apb1-clk"; 203 compatible = "allwinner,sun4i-a10-apb1-clk";
204 reg = <0x01c20058 0x4>; 204 reg = <0x01c20058 0x4>;
205 clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>; 205 clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
206 clock-output-names = "apb2"; 206 clock-output-names = "apb2";
207 }; 207 };
208 208
@@ -218,27 +218,41 @@
218 }; 218 };
219 219
220 mmc0_clk: clk@01c20088 { 220 mmc0_clk: clk@01c20088 {
221 #clock-cells = <0>; 221 #clock-cells = <1>;
222 compatible = "allwinner,sun4i-a10-mod0-clk"; 222 compatible = "allwinner,sun4i-a10-mmc-clk";
223 reg = <0x01c20088 0x4>; 223 reg = <0x01c20088 0x4>;
224 clocks = <&osc24M>, <&pll6>; 224 clocks = <&osc24M>, <&pll6 0>;
225 clock-output-names = "mmc0"; 225 clock-output-names = "mmc0",
226 "mmc0_output",
227 "mmc0_sample";
226 }; 228 };
227 229
228 mmc1_clk: clk@01c2008c { 230 mmc1_clk: clk@01c2008c {
229 #clock-cells = <0>; 231 #clock-cells = <1>;
230 compatible = "allwinner,sun4i-a10-mod0-clk"; 232 compatible = "allwinner,sun4i-a10-mmc-clk";
231 reg = <0x01c2008c 0x4>; 233 reg = <0x01c2008c 0x4>;
232 clocks = <&osc24M>, <&pll6>; 234 clocks = <&osc24M>, <&pll6 0>;
233 clock-output-names = "mmc1"; 235 clock-output-names = "mmc1",
236 "mmc1_output",
237 "mmc1_sample";
234 }; 238 };
235 239
236 mmc2_clk: clk@01c20090 { 240 mmc2_clk: clk@01c20090 {
237 #clock-cells = <0>; 241 #clock-cells = <1>;
238 compatible = "allwinner,sun4i-a10-mod0-clk"; 242 compatible = "allwinner,sun4i-a10-mmc-clk";
239 reg = <0x01c20090 0x4>; 243 reg = <0x01c20090 0x4>;
240 clocks = <&osc24M>, <&pll6>; 244 clocks = <&osc24M>, <&pll6 0>;
241 clock-output-names = "mmc2"; 245 clock-output-names = "mmc2",
246 "mmc2_output",
247 "mmc2_sample";
248 };
249
250 mbus_clk: clk@01c2015c {
251 #clock-cells = <0>;
252 compatible = "allwinner,sun8i-a23-mbus-clk";
253 reg = <0x01c2015c 0x4>;
254 clocks = <&osc24M>, <&pll6 1>, <&pll5>;
255 clock-output-names = "mbus";
242 }; 256 };
243 }; 257 };
244 258
@@ -260,8 +274,14 @@
260 mmc0: mmc@01c0f000 { 274 mmc0: mmc@01c0f000 {
261 compatible = "allwinner,sun5i-a13-mmc"; 275 compatible = "allwinner,sun5i-a13-mmc";
262 reg = <0x01c0f000 0x1000>; 276 reg = <0x01c0f000 0x1000>;
263 clocks = <&ahb1_gates 8>, <&mmc0_clk>; 277 clocks = <&ahb1_gates 8>,
264 clock-names = "ahb", "mmc"; 278 <&mmc0_clk 0>,
279 <&mmc0_clk 1>,
280 <&mmc0_clk 2>;
281 clock-names = "ahb",
282 "mmc",
283 "output",
284 "sample";
265 resets = <&ahb1_rst 8>; 285 resets = <&ahb1_rst 8>;
266 reset-names = "ahb"; 286 reset-names = "ahb";
267 interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>; 287 interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
@@ -271,8 +291,14 @@
271 mmc1: mmc@01c10000 { 291 mmc1: mmc@01c10000 {
272 compatible = "allwinner,sun5i-a13-mmc"; 292 compatible = "allwinner,sun5i-a13-mmc";
273 reg = <0x01c10000 0x1000>; 293 reg = <0x01c10000 0x1000>;
274 clocks = <&ahb1_gates 9>, <&mmc1_clk>; 294 clocks = <&ahb1_gates 9>,
275 clock-names = "ahb", "mmc"; 295 <&mmc1_clk 0>,
296 <&mmc1_clk 1>,
297 <&mmc1_clk 2>;
298 clock-names = "ahb",
299 "mmc",
300 "output",
301 "sample";
276 resets = <&ahb1_rst 9>; 302 resets = <&ahb1_rst 9>;
277 reset-names = "ahb"; 303 reset-names = "ahb";
278 interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>; 304 interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
@@ -282,8 +308,14 @@
282 mmc2: mmc@01c11000 { 308 mmc2: mmc@01c11000 {
283 compatible = "allwinner,sun5i-a13-mmc"; 309 compatible = "allwinner,sun5i-a13-mmc";
284 reg = <0x01c11000 0x1000>; 310 reg = <0x01c11000 0x1000>;
285 clocks = <&ahb1_gates 10>, <&mmc2_clk>; 311 clocks = <&ahb1_gates 10>,
286 clock-names = "ahb", "mmc"; 312 <&mmc2_clk 0>,
313 <&mmc2_clk 1>,
314 <&mmc2_clk 2>;
315 clock-names = "ahb",
316 "mmc",
317 "output",
318 "sample";
287 resets = <&ahb1_rst 10>; 319 resets = <&ahb1_rst 10>;
288 reset-names = "ahb"; 320 reset-names = "ahb";
289 interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>; 321 interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index e8a4c955241b..b7e6b6fba5e0 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -62,6 +62,17 @@ CONFIG_MACH_SPEAR1340=y
62CONFIG_ARCH_STI=y 62CONFIG_ARCH_STI=y
63CONFIG_ARCH_EXYNOS=y 63CONFIG_ARCH_EXYNOS=y
64CONFIG_EXYNOS5420_MCPM=y 64CONFIG_EXYNOS5420_MCPM=y
65CONFIG_ARCH_SHMOBILE_MULTI=y
66CONFIG_ARCH_EMEV2=y
67CONFIG_ARCH_R7S72100=y
68CONFIG_ARCH_R8A73A4=y
69CONFIG_ARCH_R8A7740=y
70CONFIG_ARCH_R8A7779=y
71CONFIG_ARCH_R8A7790=y
72CONFIG_ARCH_R8A7791=y
73CONFIG_ARCH_R8A7794=y
74CONFIG_ARCH_SH73A0=y
75CONFIG_MACH_MARZEN=y
65CONFIG_ARCH_SUNXI=y 76CONFIG_ARCH_SUNXI=y
66CONFIG_ARCH_SIRF=y 77CONFIG_ARCH_SIRF=y
67CONFIG_ARCH_TEGRA=y 78CONFIG_ARCH_TEGRA=y
@@ -84,6 +95,8 @@ CONFIG_PCI_KEYSTONE=y
84CONFIG_PCI_MSI=y 95CONFIG_PCI_MSI=y
85CONFIG_PCI_MVEBU=y 96CONFIG_PCI_MVEBU=y
86CONFIG_PCI_TEGRA=y 97CONFIG_PCI_TEGRA=y
98CONFIG_PCI_RCAR_GEN2=y
99CONFIG_PCI_RCAR_GEN2_PCIE=y
87CONFIG_PCIEPORTBUS=y 100CONFIG_PCIEPORTBUS=y
88CONFIG_SMP=y 101CONFIG_SMP=y
89CONFIG_NR_CPUS=8 102CONFIG_NR_CPUS=8
@@ -130,6 +143,7 @@ CONFIG_DEVTMPFS_MOUNT=y
130CONFIG_DMA_CMA=y 143CONFIG_DMA_CMA=y
131CONFIG_CMA_SIZE_MBYTES=64 144CONFIG_CMA_SIZE_MBYTES=64
132CONFIG_OMAP_OCP2SCP=y 145CONFIG_OMAP_OCP2SCP=y
146CONFIG_SIMPLE_PM_BUS=y
133CONFIG_MTD=y 147CONFIG_MTD=y
134CONFIG_MTD_CMDLINE_PARTS=y 148CONFIG_MTD_CMDLINE_PARTS=y
135CONFIG_MTD_BLOCK=y 149CONFIG_MTD_BLOCK=y
@@ -157,6 +171,7 @@ CONFIG_AHCI_SUNXI=y
157CONFIG_AHCI_TEGRA=y 171CONFIG_AHCI_TEGRA=y
158CONFIG_SATA_HIGHBANK=y 172CONFIG_SATA_HIGHBANK=y
159CONFIG_SATA_MV=y 173CONFIG_SATA_MV=y
174CONFIG_SATA_RCAR=y
160CONFIG_NETDEVICES=y 175CONFIG_NETDEVICES=y
161CONFIG_HIX5HD2_GMAC=y 176CONFIG_HIX5HD2_GMAC=y
162CONFIG_SUN4I_EMAC=y 177CONFIG_SUN4I_EMAC=y
@@ -167,14 +182,17 @@ CONFIG_MV643XX_ETH=y
167CONFIG_MVNETA=y 182CONFIG_MVNETA=y
168CONFIG_KS8851=y 183CONFIG_KS8851=y
169CONFIG_R8169=y 184CONFIG_R8169=y
185CONFIG_SH_ETH=y
170CONFIG_SMSC911X=y 186CONFIG_SMSC911X=y
171CONFIG_STMMAC_ETH=y 187CONFIG_STMMAC_ETH=y
172CONFIG_TI_CPSW=y 188CONFIG_TI_CPSW=y
173CONFIG_XILINX_EMACLITE=y 189CONFIG_XILINX_EMACLITE=y
174CONFIG_AT803X_PHY=y 190CONFIG_AT803X_PHY=y
175CONFIG_MARVELL_PHY=y 191CONFIG_MARVELL_PHY=y
192CONFIG_SMSC_PHY=y
176CONFIG_BROADCOM_PHY=y 193CONFIG_BROADCOM_PHY=y
177CONFIG_ICPLUS_PHY=y 194CONFIG_ICPLUS_PHY=y
195CONFIG_MICREL_PHY=y
178CONFIG_USB_PEGASUS=y 196CONFIG_USB_PEGASUS=y
179CONFIG_USB_USBNET=y 197CONFIG_USB_USBNET=y
180CONFIG_USB_NET_SMSC75XX=y 198CONFIG_USB_NET_SMSC75XX=y
@@ -192,15 +210,18 @@ CONFIG_KEYBOARD_CROS_EC=y
192CONFIG_MOUSE_PS2_ELANTECH=y 210CONFIG_MOUSE_PS2_ELANTECH=y
193CONFIG_INPUT_TOUCHSCREEN=y 211CONFIG_INPUT_TOUCHSCREEN=y
194CONFIG_TOUCHSCREEN_ATMEL_MXT=y 212CONFIG_TOUCHSCREEN_ATMEL_MXT=y
213CONFIG_TOUCHSCREEN_ST1232=m
195CONFIG_TOUCHSCREEN_STMPE=y 214CONFIG_TOUCHSCREEN_STMPE=y
196CONFIG_TOUCHSCREEN_SUN4I=y 215CONFIG_TOUCHSCREEN_SUN4I=y
197CONFIG_INPUT_MISC=y 216CONFIG_INPUT_MISC=y
198CONFIG_INPUT_MPU3050=y 217CONFIG_INPUT_MPU3050=y
199CONFIG_INPUT_AXP20X_PEK=y 218CONFIG_INPUT_AXP20X_PEK=y
219CONFIG_INPUT_ADXL34X=m
200CONFIG_SERIO_AMBAKMI=y 220CONFIG_SERIO_AMBAKMI=y
201CONFIG_SERIAL_8250=y 221CONFIG_SERIAL_8250=y
202CONFIG_SERIAL_8250_CONSOLE=y 222CONFIG_SERIAL_8250_CONSOLE=y
203CONFIG_SERIAL_8250_DW=y 223CONFIG_SERIAL_8250_DW=y
224CONFIG_SERIAL_8250_EM=y
204CONFIG_SERIAL_8250_MT6577=y 225CONFIG_SERIAL_8250_MT6577=y
205CONFIG_SERIAL_AMBA_PL011=y 226CONFIG_SERIAL_AMBA_PL011=y
206CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 227CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
@@ -213,6 +234,9 @@ CONFIG_SERIAL_SIRFSOC_CONSOLE=y
213CONFIG_SERIAL_TEGRA=y 234CONFIG_SERIAL_TEGRA=y
214CONFIG_SERIAL_IMX=y 235CONFIG_SERIAL_IMX=y
215CONFIG_SERIAL_IMX_CONSOLE=y 236CONFIG_SERIAL_IMX_CONSOLE=y
237CONFIG_SERIAL_SH_SCI=y
238CONFIG_SERIAL_SH_SCI_NR_UARTS=20
239CONFIG_SERIAL_SH_SCI_CONSOLE=y
216CONFIG_SERIAL_MSM=y 240CONFIG_SERIAL_MSM=y
217CONFIG_SERIAL_MSM_CONSOLE=y 241CONFIG_SERIAL_MSM_CONSOLE=y
218CONFIG_SERIAL_VT8500=y 242CONFIG_SERIAL_VT8500=y
@@ -233,19 +257,26 @@ CONFIG_I2C_MUX_PCA954x=y
233CONFIG_I2C_MUX_PINCTRL=y 257CONFIG_I2C_MUX_PINCTRL=y
234CONFIG_I2C_CADENCE=y 258CONFIG_I2C_CADENCE=y
235CONFIG_I2C_DESIGNWARE_PLATFORM=y 259CONFIG_I2C_DESIGNWARE_PLATFORM=y
260CONFIG_I2C_GPIO=m
236CONFIG_I2C_EXYNOS5=y 261CONFIG_I2C_EXYNOS5=y
237CONFIG_I2C_MV64XXX=y 262CONFIG_I2C_MV64XXX=y
263CONFIG_I2C_RIIC=y
238CONFIG_I2C_S3C2410=y 264CONFIG_I2C_S3C2410=y
265CONFIG_I2C_SH_MOBILE=y
239CONFIG_I2C_SIRF=y 266CONFIG_I2C_SIRF=y
240CONFIG_I2C_TEGRA=y
241CONFIG_I2C_ST=y 267CONFIG_I2C_ST=y
242CONFIG_SPI=y 268CONFIG_I2C_TEGRA=y
243CONFIG_I2C_XILINX=y 269CONFIG_I2C_XILINX=y
244CONFIG_SPI_DAVINCI=y 270CONFIG_I2C_RCAR=y
271CONFIG_SPI=y
245CONFIG_SPI_CADENCE=y 272CONFIG_SPI_CADENCE=y
273CONFIG_SPI_DAVINCI=y
246CONFIG_SPI_OMAP24XX=y 274CONFIG_SPI_OMAP24XX=y
247CONFIG_SPI_ORION=y 275CONFIG_SPI_ORION=y
248CONFIG_SPI_PL022=y 276CONFIG_SPI_PL022=y
277CONFIG_SPI_RSPI=y
278CONFIG_SPI_SH_MSIOF=m
279CONFIG_SPI_SH_HSPI=y
249CONFIG_SPI_SIRF=y 280CONFIG_SPI_SIRF=y
250CONFIG_SPI_SUN4I=y 281CONFIG_SPI_SUN4I=y
251CONFIG_SPI_SUN6I=y 282CONFIG_SPI_SUN6I=y
@@ -259,12 +290,15 @@ CONFIG_PINCTRL_PALMAS=y
259CONFIG_PINCTRL_APQ8084=y 290CONFIG_PINCTRL_APQ8084=y
260CONFIG_GPIO_SYSFS=y 291CONFIG_GPIO_SYSFS=y
261CONFIG_GPIO_GENERIC_PLATFORM=y 292CONFIG_GPIO_GENERIC_PLATFORM=y
262CONFIG_GPIO_DWAPB=y
263CONFIG_GPIO_DAVINCI=y 293CONFIG_GPIO_DAVINCI=y
294CONFIG_GPIO_DWAPB=y
295CONFIG_GPIO_EM=y
296CONFIG_GPIO_RCAR=y
264CONFIG_GPIO_XILINX=y 297CONFIG_GPIO_XILINX=y
265CONFIG_GPIO_ZYNQ=y 298CONFIG_GPIO_ZYNQ=y
266CONFIG_GPIO_PCA953X=y 299CONFIG_GPIO_PCA953X=y
267CONFIG_GPIO_PCA953X_IRQ=y 300CONFIG_GPIO_PCA953X_IRQ=y
301CONFIG_GPIO_PCF857X=y
268CONFIG_GPIO_TWL4030=y 302CONFIG_GPIO_TWL4030=y
269CONFIG_GPIO_PALMAS=y 303CONFIG_GPIO_PALMAS=y
270CONFIG_GPIO_SYSCON=y 304CONFIG_GPIO_SYSCON=y
@@ -276,10 +310,12 @@ CONFIG_POWER_RESET_AS3722=y
276CONFIG_POWER_RESET_GPIO=y 310CONFIG_POWER_RESET_GPIO=y
277CONFIG_POWER_RESET_KEYSTONE=y 311CONFIG_POWER_RESET_KEYSTONE=y
278CONFIG_POWER_RESET_SUN6I=y 312CONFIG_POWER_RESET_SUN6I=y
313CONFIG_POWER_RESET_RMOBILE=y
279CONFIG_SENSORS_LM90=y 314CONFIG_SENSORS_LM90=y
280CONFIG_SENSORS_LM95245=y 315CONFIG_SENSORS_LM95245=y
281CONFIG_THERMAL=y 316CONFIG_THERMAL=y
282CONFIG_CPU_THERMAL=y 317CONFIG_CPU_THERMAL=y
318CONFIG_RCAR_THERMAL=y
283CONFIG_ARMADA_THERMAL=y 319CONFIG_ARMADA_THERMAL=y
284CONFIG_DAVINCI_WATCHDOG 320CONFIG_DAVINCI_WATCHDOG
285CONFIG_ST_THERMAL_SYSCFG=y 321CONFIG_ST_THERMAL_SYSCFG=y
@@ -290,6 +326,7 @@ CONFIG_ARM_SP805_WATCHDOG=y
290CONFIG_ORION_WATCHDOG=y 326CONFIG_ORION_WATCHDOG=y
291CONFIG_SUNXI_WATCHDOG=y 327CONFIG_SUNXI_WATCHDOG=y
292CONFIG_MESON_WATCHDOG=y 328CONFIG_MESON_WATCHDOG=y
329CONFIG_MFD_AS3711=y
293CONFIG_MFD_AS3722=y 330CONFIG_MFD_AS3722=y
294CONFIG_MFD_BCM590XX=y 331CONFIG_MFD_BCM590XX=y
295CONFIG_MFD_AXP20X=y 332CONFIG_MFD_AXP20X=y
@@ -304,13 +341,16 @@ CONFIG_MFD_TPS65090=y
304CONFIG_MFD_TPS6586X=y 341CONFIG_MFD_TPS6586X=y
305CONFIG_MFD_TPS65910=y 342CONFIG_MFD_TPS65910=y
306CONFIG_REGULATOR_AB8500=y 343CONFIG_REGULATOR_AB8500=y
344CONFIG_REGULATOR_AS3711=y
307CONFIG_REGULATOR_AS3722=y 345CONFIG_REGULATOR_AS3722=y
308CONFIG_REGULATOR_AXP20X=y 346CONFIG_REGULATOR_AXP20X=y
309CONFIG_REGULATOR_BCM590XX=y 347CONFIG_REGULATOR_BCM590XX=y
348CONFIG_REGULATOR_DA9210=y
310CONFIG_REGULATOR_GPIO=y 349CONFIG_REGULATOR_GPIO=y
311CONFIG_MFD_SYSCON=y 350CONFIG_MFD_SYSCON=y
312CONFIG_POWER_RESET_SYSCON=y 351CONFIG_POWER_RESET_SYSCON=y
313CONFIG_REGULATOR_MAX8907=y 352CONFIG_REGULATOR_MAX8907=y
353CONFIG_REGULATOR_MAX8973=y
314CONFIG_REGULATOR_MAX77686=y 354CONFIG_REGULATOR_MAX77686=y
315CONFIG_REGULATOR_PALMAS=y 355CONFIG_REGULATOR_PALMAS=y
316CONFIG_REGULATOR_S2MPS11=y 356CONFIG_REGULATOR_S2MPS11=y
@@ -324,18 +364,32 @@ CONFIG_REGULATOR_TWL4030=y
324CONFIG_REGULATOR_VEXPRESS=y 364CONFIG_REGULATOR_VEXPRESS=y
325CONFIG_MEDIA_SUPPORT=y 365CONFIG_MEDIA_SUPPORT=y
326CONFIG_MEDIA_CAMERA_SUPPORT=y 366CONFIG_MEDIA_CAMERA_SUPPORT=y
367CONFIG_MEDIA_CONTROLLER=y
368CONFIG_VIDEO_V4L2_SUBDEV_API=y
327CONFIG_MEDIA_USB_SUPPORT=y 369CONFIG_MEDIA_USB_SUPPORT=y
328CONFIG_USB_VIDEO_CLASS=y 370CONFIG_USB_VIDEO_CLASS=y
329CONFIG_USB_GSPCA=y 371CONFIG_USB_GSPCA=y
372CONFIG_V4L_PLATFORM_DRIVERS=y
373CONFIG_SOC_CAMERA=m
374CONFIG_SOC_CAMERA_PLATFORM=m
375CONFIG_VIDEO_RCAR_VIN=m
376CONFIG_V4L_MEM2MEM_DRIVERS=y
377CONFIG_VIDEO_RENESAS_VSP1=m
378# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
379CONFIG_VIDEO_ADV7180=m
330CONFIG_DRM=y 380CONFIG_DRM=y
381CONFIG_DRM_RCAR_DU=m
331CONFIG_DRM_TEGRA=y 382CONFIG_DRM_TEGRA=y
332CONFIG_DRM_PANEL_SIMPLE=y 383CONFIG_DRM_PANEL_SIMPLE=y
333CONFIG_FB_ARMCLCD=y 384CONFIG_FB_ARMCLCD=y
334CONFIG_FB_WM8505=y 385CONFIG_FB_WM8505=y
386CONFIG_FB_SH_MOBILE_LCDC=y
335CONFIG_FB_SIMPLE=y 387CONFIG_FB_SIMPLE=y
388CONFIG_FB_SH_MOBILE_MERAM=y
336CONFIG_BACKLIGHT_LCD_SUPPORT=y 389CONFIG_BACKLIGHT_LCD_SUPPORT=y
337CONFIG_BACKLIGHT_CLASS_DEVICE=y 390CONFIG_BACKLIGHT_CLASS_DEVICE=y
338CONFIG_BACKLIGHT_PWM=y 391CONFIG_BACKLIGHT_PWM=y
392CONFIG_BACKLIGHT_AS3711=y
339CONFIG_FRAMEBUFFER_CONSOLE=y 393CONFIG_FRAMEBUFFER_CONSOLE=y
340CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y 394CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
341CONFIG_SOUND=y 395CONFIG_SOUND=y
@@ -343,6 +397,8 @@ CONFIG_SND=y
343CONFIG_SND_DYNAMIC_MINORS=y 397CONFIG_SND_DYNAMIC_MINORS=y
344CONFIG_SND_USB_AUDIO=y 398CONFIG_SND_USB_AUDIO=y
345CONFIG_SND_SOC=y 399CONFIG_SND_SOC=y
400CONFIG_SND_SOC_SH4_FSI=m
401CONFIG_SND_SOC_RCAR=m
346CONFIG_SND_SOC_TEGRA=y 402CONFIG_SND_SOC_TEGRA=y
347CONFIG_SND_SOC_TEGRA_RT5640=y 403CONFIG_SND_SOC_TEGRA_RT5640=y
348CONFIG_SND_SOC_TEGRA_WM8753=y 404CONFIG_SND_SOC_TEGRA_WM8753=y
@@ -350,6 +406,8 @@ CONFIG_SND_SOC_TEGRA_WM8903=y
350CONFIG_SND_SOC_TEGRA_TRIMSLICE=y 406CONFIG_SND_SOC_TEGRA_TRIMSLICE=y
351CONFIG_SND_SOC_TEGRA_ALC5632=y 407CONFIG_SND_SOC_TEGRA_ALC5632=y
352CONFIG_SND_SOC_TEGRA_MAX98090=y 408CONFIG_SND_SOC_TEGRA_MAX98090=y
409CONFIG_SND_SOC_AK4642=m
410CONFIG_SND_SOC_WM8978=m
353CONFIG_USB=y 411CONFIG_USB=y
354CONFIG_USB_XHCI_HCD=y 412CONFIG_USB_XHCI_HCD=y
355CONFIG_USB_XHCI_MVEBU=y 413CONFIG_USB_XHCI_MVEBU=y
@@ -362,6 +420,8 @@ CONFIG_USB_ISP1760_HCD=y
362CONFIG_USB_OHCI_HCD=y 420CONFIG_USB_OHCI_HCD=y
363CONFIG_USB_OHCI_HCD_STI=y 421CONFIG_USB_OHCI_HCD_STI=y
364CONFIG_USB_OHCI_HCD_PLATFORM=y 422CONFIG_USB_OHCI_HCD_PLATFORM=y
423CONFIG_USB_R8A66597_HCD=m
424CONFIG_USB_RENESAS_USBHS=m
365CONFIG_USB_STORAGE=y 425CONFIG_USB_STORAGE=y
366CONFIG_USB_DWC3=y 426CONFIG_USB_DWC3=y
367CONFIG_USB_CHIPIDEA=y 427CONFIG_USB_CHIPIDEA=y
@@ -374,6 +434,10 @@ CONFIG_SAMSUNG_USB3PHY=y
374CONFIG_USB_GPIO_VBUS=y 434CONFIG_USB_GPIO_VBUS=y
375CONFIG_USB_ISP1301=y 435CONFIG_USB_ISP1301=y
376CONFIG_USB_MXS_PHY=y 436CONFIG_USB_MXS_PHY=y
437CONFIG_USB_RCAR_PHY=m
438CONFIG_USB_RCAR_GEN2_PHY=m
439CONFIG_USB_GADGET=y
440CONFIG_USB_RENESAS_USBHS_UDC=m
377CONFIG_MMC=y 441CONFIG_MMC=y
378CONFIG_MMC_BLOCK_MINORS=16 442CONFIG_MMC_BLOCK_MINORS=16
379CONFIG_MMC_ARMMMCI=y 443CONFIG_MMC_ARMMMCI=y
@@ -392,12 +456,14 @@ CONFIG_MMC_SDHCI_ST=y
392CONFIG_MMC_OMAP=y 456CONFIG_MMC_OMAP=y
393CONFIG_MMC_OMAP_HS=y 457CONFIG_MMC_OMAP_HS=y
394CONFIG_MMC_MVSDIO=y 458CONFIG_MMC_MVSDIO=y
395CONFIG_MMC_SUNXI=y 459CONFIG_MMC_SDHI=y
396CONFIG_MMC_DW=y 460CONFIG_MMC_DW=y
397CONFIG_MMC_DW_IDMAC=y 461CONFIG_MMC_DW_IDMAC=y
398CONFIG_MMC_DW_PLTFM=y 462CONFIG_MMC_DW_PLTFM=y
399CONFIG_MMC_DW_EXYNOS=y 463CONFIG_MMC_DW_EXYNOS=y
400CONFIG_MMC_DW_ROCKCHIP=y 464CONFIG_MMC_DW_ROCKCHIP=y
465CONFIG_MMC_SH_MMCIF=y
466CONFIG_MMC_SUNXI=y
401CONFIG_NEW_LEDS=y 467CONFIG_NEW_LEDS=y
402CONFIG_LEDS_CLASS=y 468CONFIG_LEDS_CLASS=y
403CONFIG_LEDS_GPIO=y 469CONFIG_LEDS_GPIO=y
@@ -421,10 +487,12 @@ CONFIG_RTC_DRV_AS3722=y
421CONFIG_RTC_DRV_DS1307=y 487CONFIG_RTC_DRV_DS1307=y
422CONFIG_RTC_DRV_MAX8907=y 488CONFIG_RTC_DRV_MAX8907=y
423CONFIG_RTC_DRV_MAX77686=y 489CONFIG_RTC_DRV_MAX77686=y
490CONFIG_RTC_DRV_RS5C372=m
424CONFIG_RTC_DRV_PALMAS=y 491CONFIG_RTC_DRV_PALMAS=y
425CONFIG_RTC_DRV_TWL4030=y 492CONFIG_RTC_DRV_TWL4030=y
426CONFIG_RTC_DRV_TPS6586X=y 493CONFIG_RTC_DRV_TPS6586X=y
427CONFIG_RTC_DRV_TPS65910=y 494CONFIG_RTC_DRV_TPS65910=y
495CONFIG_RTC_DRV_S35390A=m
428CONFIG_RTC_DRV_EM3027=y 496CONFIG_RTC_DRV_EM3027=y
429CONFIG_RTC_DRV_PL031=y 497CONFIG_RTC_DRV_PL031=y
430CONFIG_RTC_DRV_VT8500=y 498CONFIG_RTC_DRV_VT8500=y
@@ -436,6 +504,9 @@ CONFIG_DMADEVICES=y
436CONFIG_DW_DMAC=y 504CONFIG_DW_DMAC=y
437CONFIG_MV_XOR=y 505CONFIG_MV_XOR=y
438CONFIG_TEGRA20_APB_DMA=y 506CONFIG_TEGRA20_APB_DMA=y
507CONFIG_SH_DMAE=y
508CONFIG_RCAR_AUDMAC_PP=m
509CONFIG_RCAR_DMAC=y
439CONFIG_STE_DMA40=y 510CONFIG_STE_DMA40=y
440CONFIG_SIRF_DMA=y 511CONFIG_SIRF_DMA=y
441CONFIG_TI_EDMA=y 512CONFIG_TI_EDMA=y
@@ -468,6 +539,7 @@ CONFIG_IIO=y
468CONFIG_XILINX_XADC=y 539CONFIG_XILINX_XADC=y
469CONFIG_AK8975=y 540CONFIG_AK8975=y
470CONFIG_PWM=y 541CONFIG_PWM=y
542CONFIG_PWM_RENESAS_TPU=y
471CONFIG_PWM_TEGRA=y 543CONFIG_PWM_TEGRA=y
472CONFIG_PWM_VT8500=y 544CONFIG_PWM_VT8500=y
473CONFIG_PHY_HIX5HD2_SATA=y 545CONFIG_PHY_HIX5HD2_SATA=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index b7386524c356..a097cffa1231 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -114,6 +114,7 @@ CONFIG_MTD_PHYSMAP_OF=y
114CONFIG_MTD_NAND=y 114CONFIG_MTD_NAND=y
115CONFIG_MTD_NAND_ECC_BCH=y 115CONFIG_MTD_NAND_ECC_BCH=y
116CONFIG_MTD_NAND_OMAP2=y 116CONFIG_MTD_NAND_OMAP2=y
117CONFIG_MTD_NAND_OMAP_BCH=y
117CONFIG_MTD_ONENAND=y 118CONFIG_MTD_ONENAND=y
118CONFIG_MTD_ONENAND_VERIFY_WRITE=y 119CONFIG_MTD_ONENAND_VERIFY_WRITE=y
119CONFIG_MTD_ONENAND_OMAP2=y 120CONFIG_MTD_ONENAND_OMAP2=y
@@ -248,6 +249,7 @@ CONFIG_TWL6040_CORE=y
248CONFIG_REGULATOR_PALMAS=y 249CONFIG_REGULATOR_PALMAS=y
249CONFIG_REGULATOR_PBIAS=y 250CONFIG_REGULATOR_PBIAS=y
250CONFIG_REGULATOR_TI_ABB=y 251CONFIG_REGULATOR_TI_ABB=y
252CONFIG_REGULATOR_TPS62360=m
251CONFIG_REGULATOR_TPS65023=y 253CONFIG_REGULATOR_TPS65023=y
252CONFIG_REGULATOR_TPS6507X=y 254CONFIG_REGULATOR_TPS6507X=y
253CONFIG_REGULATOR_TPS65217=y 255CONFIG_REGULATOR_TPS65217=y
@@ -374,7 +376,7 @@ CONFIG_PWM_TIEHRPWM=m
374CONFIG_PWM_TWL=m 376CONFIG_PWM_TWL=m
375CONFIG_PWM_TWL_LED=m 377CONFIG_PWM_TWL_LED=m
376CONFIG_OMAP_USB2=m 378CONFIG_OMAP_USB2=m
377CONFIG_TI_PIPE3=m 379CONFIG_TI_PIPE3=y
378CONFIG_EXT2_FS=y 380CONFIG_EXT2_FS=y
379CONFIG_EXT3_FS=y 381CONFIG_EXT3_FS=y
380# CONFIG_EXT3_FS_XATTR is not set 382# CONFIG_EXT3_FS_XATTR is not set
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 4767eb9caa78..ce0786efd26c 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -73,7 +73,7 @@ static inline void set_fs(mm_segment_t fs)
73 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); 73 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
74} 74}
75 75
76#define segment_eq(a,b) ((a) == (b)) 76#define segment_eq(a, b) ((a) == (b))
77 77
78#define __addr_ok(addr) ({ \ 78#define __addr_ok(addr) ({ \
79 unsigned long flag; \ 79 unsigned long flag; \
@@ -84,7 +84,7 @@ static inline void set_fs(mm_segment_t fs)
84 (flag == 0); }) 84 (flag == 0); })
85 85
86/* We use 33-bit arithmetic here... */ 86/* We use 33-bit arithmetic here... */
87#define __range_ok(addr,size) ({ \ 87#define __range_ok(addr, size) ({ \
88 unsigned long flag, roksum; \ 88 unsigned long flag, roksum; \
89 __chk_user_ptr(addr); \ 89 __chk_user_ptr(addr); \
90 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \ 90 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
@@ -123,7 +123,7 @@ extern int __get_user_64t_4(void *);
123#define __GUP_CLOBBER_32t_8 "lr", "cc" 123#define __GUP_CLOBBER_32t_8 "lr", "cc"
124#define __GUP_CLOBBER_8 "lr", "cc" 124#define __GUP_CLOBBER_8 "lr", "cc"
125 125
126#define __get_user_x(__r2,__p,__e,__l,__s) \ 126#define __get_user_x(__r2, __p, __e, __l, __s) \
127 __asm__ __volatile__ ( \ 127 __asm__ __volatile__ ( \
128 __asmeq("%0", "r0") __asmeq("%1", "r2") \ 128 __asmeq("%0", "r0") __asmeq("%1", "r2") \
129 __asmeq("%3", "r1") \ 129 __asmeq("%3", "r1") \
@@ -134,7 +134,7 @@ extern int __get_user_64t_4(void *);
134 134
135/* narrowing a double-word get into a single 32bit word register: */ 135/* narrowing a double-word get into a single 32bit word register: */
136#ifdef __ARMEB__ 136#ifdef __ARMEB__
137#define __get_user_x_32t(__r2, __p, __e, __l, __s) \ 137#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
138 __get_user_x(__r2, __p, __e, __l, 32t_8) 138 __get_user_x(__r2, __p, __e, __l, 32t_8)
139#else 139#else
140#define __get_user_x_32t __get_user_x 140#define __get_user_x_32t __get_user_x
@@ -158,7 +158,7 @@ extern int __get_user_64t_4(void *);
158#endif 158#endif
159 159
160 160
161#define __get_user_check(x,p) \ 161#define __get_user_check(x, p) \
162 ({ \ 162 ({ \
163 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 163 unsigned long __limit = current_thread_info()->addr_limit - 1; \
164 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 164 register const typeof(*(p)) __user *__p asm("r0") = (p);\
@@ -196,10 +196,10 @@ extern int __get_user_64t_4(void *);
196 __e; \ 196 __e; \
197 }) 197 })
198 198
199#define get_user(x,p) \ 199#define get_user(x, p) \
200 ({ \ 200 ({ \
201 might_fault(); \ 201 might_fault(); \
202 __get_user_check(x,p); \ 202 __get_user_check(x, p); \
203 }) 203 })
204 204
205extern int __put_user_1(void *, unsigned int); 205extern int __put_user_1(void *, unsigned int);
@@ -207,7 +207,7 @@ extern int __put_user_2(void *, unsigned int);
207extern int __put_user_4(void *, unsigned int); 207extern int __put_user_4(void *, unsigned int);
208extern int __put_user_8(void *, unsigned long long); 208extern int __put_user_8(void *, unsigned long long);
209 209
210#define __put_user_x(__r2,__p,__e,__l,__s) \ 210#define __put_user_x(__r2, __p, __e, __l, __s) \
211 __asm__ __volatile__ ( \ 211 __asm__ __volatile__ ( \
212 __asmeq("%0", "r0") __asmeq("%2", "r2") \ 212 __asmeq("%0", "r0") __asmeq("%2", "r2") \
213 __asmeq("%3", "r1") \ 213 __asmeq("%3", "r1") \
@@ -216,7 +216,7 @@ extern int __put_user_8(void *, unsigned long long);
216 : "0" (__p), "r" (__r2), "r" (__l) \ 216 : "0" (__p), "r" (__r2), "r" (__l) \
217 : "ip", "lr", "cc") 217 : "ip", "lr", "cc")
218 218
219#define __put_user_check(x,p) \ 219#define __put_user_check(x, p) \
220 ({ \ 220 ({ \
221 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 221 unsigned long __limit = current_thread_info()->addr_limit - 1; \
222 const typeof(*(p)) __user *__tmp_p = (p); \ 222 const typeof(*(p)) __user *__tmp_p = (p); \
@@ -242,10 +242,10 @@ extern int __put_user_8(void *, unsigned long long);
242 __e; \ 242 __e; \
243 }) 243 })
244 244
245#define put_user(x,p) \ 245#define put_user(x, p) \
246 ({ \ 246 ({ \
247 might_fault(); \ 247 might_fault(); \
248 __put_user_check(x,p); \ 248 __put_user_check(x, p); \
249 }) 249 })
250 250
251#else /* CONFIG_MMU */ 251#else /* CONFIG_MMU */
@@ -255,21 +255,21 @@ extern int __put_user_8(void *, unsigned long long);
255 */ 255 */
256#define USER_DS KERNEL_DS 256#define USER_DS KERNEL_DS
257 257
258#define segment_eq(a,b) (1) 258#define segment_eq(a, b) (1)
259#define __addr_ok(addr) ((void)(addr),1) 259#define __addr_ok(addr) ((void)(addr), 1)
260#define __range_ok(addr,size) ((void)(addr),0) 260#define __range_ok(addr, size) ((void)(addr), 0)
261#define get_fs() (KERNEL_DS) 261#define get_fs() (KERNEL_DS)
262 262
263static inline void set_fs(mm_segment_t fs) 263static inline void set_fs(mm_segment_t fs)
264{ 264{
265} 265}
266 266
267#define get_user(x,p) __get_user(x,p) 267#define get_user(x, p) __get_user(x, p)
268#define put_user(x,p) __put_user(x,p) 268#define put_user(x, p) __put_user(x, p)
269 269
270#endif /* CONFIG_MMU */ 270#endif /* CONFIG_MMU */
271 271
272#define access_ok(type,addr,size) (__range_ok(addr,size) == 0) 272#define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
273 273
274#define user_addr_max() \ 274#define user_addr_max() \
275 (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs()) 275 (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
@@ -283,35 +283,35 @@ static inline void set_fs(mm_segment_t fs)
283 * error occurs, and leave it unchanged on success. Note that these 283 * error occurs, and leave it unchanged on success. Note that these
284 * versions are void (ie, don't return a value as such). 284 * versions are void (ie, don't return a value as such).
285 */ 285 */
286#define __get_user(x,ptr) \ 286#define __get_user(x, ptr) \
287({ \ 287({ \
288 long __gu_err = 0; \ 288 long __gu_err = 0; \
289 __get_user_err((x),(ptr),__gu_err); \ 289 __get_user_err((x), (ptr), __gu_err); \
290 __gu_err; \ 290 __gu_err; \
291}) 291})
292 292
293#define __get_user_error(x,ptr,err) \ 293#define __get_user_error(x, ptr, err) \
294({ \ 294({ \
295 __get_user_err((x),(ptr),err); \ 295 __get_user_err((x), (ptr), err); \
296 (void) 0; \ 296 (void) 0; \
297}) 297})
298 298
299#define __get_user_err(x,ptr,err) \ 299#define __get_user_err(x, ptr, err) \
300do { \ 300do { \
301 unsigned long __gu_addr = (unsigned long)(ptr); \ 301 unsigned long __gu_addr = (unsigned long)(ptr); \
302 unsigned long __gu_val; \ 302 unsigned long __gu_val; \
303 __chk_user_ptr(ptr); \ 303 __chk_user_ptr(ptr); \
304 might_fault(); \ 304 might_fault(); \
305 switch (sizeof(*(ptr))) { \ 305 switch (sizeof(*(ptr))) { \
306 case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ 306 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
307 case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ 307 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
308 case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \ 308 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
309 default: (__gu_val) = __get_user_bad(); \ 309 default: (__gu_val) = __get_user_bad(); \
310 } \ 310 } \
311 (x) = (__typeof__(*(ptr)))__gu_val; \ 311 (x) = (__typeof__(*(ptr)))__gu_val; \
312} while (0) 312} while (0)
313 313
314#define __get_user_asm_byte(x,addr,err) \ 314#define __get_user_asm_byte(x, addr, err) \
315 __asm__ __volatile__( \ 315 __asm__ __volatile__( \
316 "1: " TUSER(ldrb) " %1,[%2],#0\n" \ 316 "1: " TUSER(ldrb) " %1,[%2],#0\n" \
317 "2:\n" \ 317 "2:\n" \
@@ -330,7 +330,7 @@ do { \
330 : "cc") 330 : "cc")
331 331
332#ifndef __ARMEB__ 332#ifndef __ARMEB__
333#define __get_user_asm_half(x,__gu_addr,err) \ 333#define __get_user_asm_half(x, __gu_addr, err) \
334({ \ 334({ \
335 unsigned long __b1, __b2; \ 335 unsigned long __b1, __b2; \
336 __get_user_asm_byte(__b1, __gu_addr, err); \ 336 __get_user_asm_byte(__b1, __gu_addr, err); \
@@ -338,7 +338,7 @@ do { \
338 (x) = __b1 | (__b2 << 8); \ 338 (x) = __b1 | (__b2 << 8); \
339}) 339})
340#else 340#else
341#define __get_user_asm_half(x,__gu_addr,err) \ 341#define __get_user_asm_half(x, __gu_addr, err) \
342({ \ 342({ \
343 unsigned long __b1, __b2; \ 343 unsigned long __b1, __b2; \
344 __get_user_asm_byte(__b1, __gu_addr, err); \ 344 __get_user_asm_byte(__b1, __gu_addr, err); \
@@ -347,7 +347,7 @@ do { \
347}) 347})
348#endif 348#endif
349 349
350#define __get_user_asm_word(x,addr,err) \ 350#define __get_user_asm_word(x, addr, err) \
351 __asm__ __volatile__( \ 351 __asm__ __volatile__( \
352 "1: " TUSER(ldr) " %1,[%2],#0\n" \ 352 "1: " TUSER(ldr) " %1,[%2],#0\n" \
353 "2:\n" \ 353 "2:\n" \
@@ -365,35 +365,35 @@ do { \
365 : "r" (addr), "i" (-EFAULT) \ 365 : "r" (addr), "i" (-EFAULT) \
366 : "cc") 366 : "cc")
367 367
368#define __put_user(x,ptr) \ 368#define __put_user(x, ptr) \
369({ \ 369({ \
370 long __pu_err = 0; \ 370 long __pu_err = 0; \
371 __put_user_err((x),(ptr),__pu_err); \ 371 __put_user_err((x), (ptr), __pu_err); \
372 __pu_err; \ 372 __pu_err; \
373}) 373})
374 374
375#define __put_user_error(x,ptr,err) \ 375#define __put_user_error(x, ptr, err) \
376({ \ 376({ \
377 __put_user_err((x),(ptr),err); \ 377 __put_user_err((x), (ptr), err); \
378 (void) 0; \ 378 (void) 0; \
379}) 379})
380 380
381#define __put_user_err(x,ptr,err) \ 381#define __put_user_err(x, ptr, err) \
382do { \ 382do { \
383 unsigned long __pu_addr = (unsigned long)(ptr); \ 383 unsigned long __pu_addr = (unsigned long)(ptr); \
384 __typeof__(*(ptr)) __pu_val = (x); \ 384 __typeof__(*(ptr)) __pu_val = (x); \
385 __chk_user_ptr(ptr); \ 385 __chk_user_ptr(ptr); \
386 might_fault(); \ 386 might_fault(); \
387 switch (sizeof(*(ptr))) { \ 387 switch (sizeof(*(ptr))) { \
388 case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ 388 case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
389 case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ 389 case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
390 case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \ 390 case 4: __put_user_asm_word(__pu_val, __pu_addr, err); break; \
391 case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \ 391 case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
392 default: __put_user_bad(); \ 392 default: __put_user_bad(); \
393 } \ 393 } \
394} while (0) 394} while (0)
395 395
396#define __put_user_asm_byte(x,__pu_addr,err) \ 396#define __put_user_asm_byte(x, __pu_addr, err) \
397 __asm__ __volatile__( \ 397 __asm__ __volatile__( \
398 "1: " TUSER(strb) " %1,[%2],#0\n" \ 398 "1: " TUSER(strb) " %1,[%2],#0\n" \
399 "2:\n" \ 399 "2:\n" \
@@ -411,22 +411,22 @@ do { \
411 : "cc") 411 : "cc")
412 412
413#ifndef __ARMEB__ 413#ifndef __ARMEB__
414#define __put_user_asm_half(x,__pu_addr,err) \ 414#define __put_user_asm_half(x, __pu_addr, err) \
415({ \ 415({ \
416 unsigned long __temp = (unsigned long)(x); \ 416 unsigned long __temp = (__force unsigned long)(x); \
417 __put_user_asm_byte(__temp, __pu_addr, err); \ 417 __put_user_asm_byte(__temp, __pu_addr, err); \
418 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ 418 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
419}) 419})
420#else 420#else
421#define __put_user_asm_half(x,__pu_addr,err) \ 421#define __put_user_asm_half(x, __pu_addr, err) \
422({ \ 422({ \
423 unsigned long __temp = (unsigned long)(x); \ 423 unsigned long __temp = (__force unsigned long)(x); \
424 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ 424 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
425 __put_user_asm_byte(__temp, __pu_addr + 1, err); \ 425 __put_user_asm_byte(__temp, __pu_addr + 1, err); \
426}) 426})
427#endif 427#endif
428 428
429#define __put_user_asm_word(x,__pu_addr,err) \ 429#define __put_user_asm_word(x, __pu_addr, err) \
430 __asm__ __volatile__( \ 430 __asm__ __volatile__( \
431 "1: " TUSER(str) " %1,[%2],#0\n" \ 431 "1: " TUSER(str) " %1,[%2],#0\n" \
432 "2:\n" \ 432 "2:\n" \
@@ -451,7 +451,7 @@ do { \
451#define __reg_oper1 "%R2" 451#define __reg_oper1 "%R2"
452#endif 452#endif
453 453
454#define __put_user_asm_dword(x,__pu_addr,err) \ 454#define __put_user_asm_dword(x, __pu_addr, err) \
455 __asm__ __volatile__( \ 455 __asm__ __volatile__( \
456 ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ 456 ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
457 ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ 457 ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
@@ -480,9 +480,9 @@ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void
480extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); 480extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
481extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); 481extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
482#else 482#else
483#define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0) 483#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
484#define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0) 484#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
485#define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) 485#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
486#endif 486#endif
487 487
488static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) 488static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index dd9acc95ebc0..61b53c46edfa 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -231,7 +231,7 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
231/* 231/*
232 * PMU platform driver and devicetree bindings. 232 * PMU platform driver and devicetree bindings.
233 */ 233 */
234static struct of_device_id cpu_pmu_of_device_ids[] = { 234static const struct of_device_id cpu_pmu_of_device_ids[] = {
235 {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init}, 235 {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init},
236 {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, 236 {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
237 {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init}, 237 {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
diff --git a/arch/arm/mach-asm9260/Kconfig b/arch/arm/mach-asm9260/Kconfig
index 8423be76080e..52241207a82a 100644
--- a/arch/arm/mach-asm9260/Kconfig
+++ b/arch/arm/mach-asm9260/Kconfig
@@ -2,5 +2,7 @@ config MACH_ASM9260
2 bool "Alphascale ASM9260" 2 bool "Alphascale ASM9260"
3 depends on ARCH_MULTI_V5 3 depends on ARCH_MULTI_V5
4 select CPU_ARM926T 4 select CPU_ARM926T
5 select ASM9260_TIMER
6 select GENERIC_CLOCKEVENTS
5 help 7 help
6 Support for Alphascale ASM9260 based platform. 8 Support for Alphascale ASM9260 based platform.
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index c6740e359a44..c74a44324e5b 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -64,7 +64,6 @@ config SOC_SAMA5D4
64 select SOC_SAMA5 64 select SOC_SAMA5
65 select CLKSRC_MMIO 65 select CLKSRC_MMIO
66 select CACHE_L2X0 66 select CACHE_L2X0
67 select CACHE_PL310
68 select HAVE_FB_ATMEL 67 select HAVE_FB_ATMEL
69 select HAVE_AT91_UTMI 68 select HAVE_AT91_UTMI
70 select HAVE_AT91_SMD 69 select HAVE_AT91_SMD
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 51761f8927b7..b00d09555f2b 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -183,7 +183,7 @@ static struct clock_event_device clkevt = {
183void __iomem *at91_st_base; 183void __iomem *at91_st_base;
184EXPORT_SYMBOL_GPL(at91_st_base); 184EXPORT_SYMBOL_GPL(at91_st_base);
185 185
186static struct of_device_id at91rm9200_st_timer_ids[] = { 186static const struct of_device_id at91rm9200_st_timer_ids[] = {
187 { .compatible = "atmel,at91rm9200-st" }, 187 { .compatible = "atmel,at91rm9200-st" },
188 { /* sentinel */ } 188 { /* sentinel */ }
189}; 189};
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index a6e726a6e0b5..583369ffc284 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -35,10 +35,10 @@ extern void __init at91sam9260_pm_init(void);
35extern void __init at91sam9g45_pm_init(void); 35extern void __init at91sam9g45_pm_init(void);
36extern void __init at91sam9x5_pm_init(void); 36extern void __init at91sam9x5_pm_init(void);
37#else 37#else
38void __init at91rm9200_pm_init(void) { } 38static inline void __init at91rm9200_pm_init(void) { }
39void __init at91sam9260_pm_init(void) { } 39static inline void __init at91sam9260_pm_init(void) { }
40void __init at91sam9g45_pm_init(void) { } 40static inline void __init at91sam9g45_pm_init(void) { }
41void __init at91sam9x5_pm_init(void) { } 41static inline void __init at91sam9x5_pm_init(void) { }
42#endif 42#endif
43 43
44#endif /* _AT91_GENERIC_H */ 44#endif /* _AT91_GENERIC_H */
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index af8d8afc2e12..5e34fb143309 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -226,7 +226,7 @@ void at91_pm_set_standby(void (*at91_standby)(void))
226 } 226 }
227} 227}
228 228
229static struct of_device_id ramc_ids[] = { 229static const struct of_device_id ramc_ids[] __initconst = {
230 { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, 230 { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
231 { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, 231 { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
232 { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, 232 { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
@@ -234,7 +234,7 @@ static struct of_device_id ramc_ids[] = {
234 { /*sentinel*/ } 234 { /*sentinel*/ }
235}; 235};
236 236
237static void at91_dt_ramc(void) 237static __init void at91_dt_ramc(void)
238{ 238{
239 struct device_node *np; 239 struct device_node *np;
240 const struct of_device_id *of_id; 240 const struct of_device_id *of_id;
diff --git a/arch/arm/mach-axxia/axxia.c b/arch/arm/mach-axxia/axxia.c
index 19e5a1d95397..4db76a493c5a 100644
--- a/arch/arm/mach-axxia/axxia.c
+++ b/arch/arm/mach-axxia/axxia.c
@@ -16,7 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <asm/mach/arch.h> 17#include <asm/mach/arch.h>
18 18
19static const char *axxia_dt_match[] __initconst = { 19static const char *const axxia_dt_match[] __initconst = {
20 "lsi,axm5516", 20 "lsi,axm5516",
21 "lsi,axm5516-sim", 21 "lsi,axm5516-sim",
22 "lsi,axm5516-emu", 22 "lsi,axm5516-emu",
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index aaeec78c3ec4..8b11f44bb36e 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -68,7 +68,7 @@ config ARCH_BCM_MOBILE
68 This enables support for systems based on Broadcom mobile SoCs. 68 This enables support for systems based on Broadcom mobile SoCs.
69 69
70config ARCH_BCM_281XX 70config ARCH_BCM_281XX
71 bool "Broadcom BCM281XX SoC family" 71 bool "Broadcom BCM281XX SoC family" if ARCH_MULTI_V7
72 select ARCH_BCM_MOBILE 72 select ARCH_BCM_MOBILE
73 select HAVE_SMP 73 select HAVE_SMP
74 help 74 help
@@ -77,7 +77,7 @@ config ARCH_BCM_281XX
77 variants. 77 variants.
78 78
79config ARCH_BCM_21664 79config ARCH_BCM_21664
80 bool "Broadcom BCM21664 SoC family" 80 bool "Broadcom BCM21664 SoC family" if ARCH_MULTI_V7
81 select ARCH_BCM_MOBILE 81 select ARCH_BCM_MOBILE
82 select HAVE_SMP 82 select HAVE_SMP
83 help 83 help
diff --git a/arch/arm/mach-bcm/brcmstb.c b/arch/arm/mach-bcm/brcmstb.c
index 60a5afa06ed7..3a60f7ee3f0c 100644
--- a/arch/arm/mach-bcm/brcmstb.c
+++ b/arch/arm/mach-bcm/brcmstb.c
@@ -17,7 +17,7 @@
17#include <asm/mach-types.h> 17#include <asm/mach-types.h>
18#include <asm/mach/arch.h> 18#include <asm/mach/arch.h>
19 19
20static const char *brcmstb_match[] __initconst = { 20static const char *const brcmstb_match[] __initconst = {
21 "brcm,bcm7445", 21 "brcm,bcm7445",
22 "brcm,brcmstb", 22 "brcm,brcmstb",
23 NULL 23 NULL
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index 584e8d4e2892..cd30f6f5f2ff 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -32,12 +32,14 @@ config ARCH_DAVINCI_DM646x
32 32
33config ARCH_DAVINCI_DA830 33config ARCH_DAVINCI_DA830
34 bool "DA830/OMAP-L137/AM17x based system" 34 bool "DA830/OMAP-L137/AM17x based system"
35 depends on !ARCH_DAVINCI_DMx || AUTO_ZRELADDR
35 select ARCH_DAVINCI_DA8XX 36 select ARCH_DAVINCI_DA8XX
36 select CPU_DCACHE_WRITETHROUGH # needed on silicon revs 1.0, 1.1 37 select CPU_DCACHE_WRITETHROUGH # needed on silicon revs 1.0, 1.1
37 select CP_INTC 38 select CP_INTC
38 39
39config ARCH_DAVINCI_DA850 40config ARCH_DAVINCI_DA850
40 bool "DA850/OMAP-L138/AM18x based system" 41 bool "DA850/OMAP-L138/AM18x based system"
42 depends on !ARCH_DAVINCI_DMx || AUTO_ZRELADDR
41 select ARCH_DAVINCI_DA8XX 43 select ARCH_DAVINCI_DA8XX
42 select CP_INTC 44 select CP_INTC
43 45
diff --git a/arch/arm/mach-davinci/da8xx-dt.c b/arch/arm/mach-davinci/da8xx-dt.c
index f703d82f08a8..438f68547f4c 100644
--- a/arch/arm/mach-davinci/da8xx-dt.c
+++ b/arch/arm/mach-davinci/da8xx-dt.c
@@ -20,7 +20,7 @@
20 20
21#define DA8XX_NUM_UARTS 3 21#define DA8XX_NUM_UARTS 3
22 22
23static struct of_device_id da8xx_irq_match[] __initdata = { 23static const struct of_device_id da8xx_irq_match[] __initconst = {
24 { .compatible = "ti,cp-intc", .data = cp_intc_of_init, }, 24 { .compatible = "ti,cp-intc", .data = cp_intc_of_init, },
25 { } 25 { }
26}; 26};
diff --git a/arch/arm/mach-davinci/mux.c b/arch/arm/mach-davinci/mux.c
index a8eb909a2b6c..6a2ff0a654a5 100644
--- a/arch/arm/mach-davinci/mux.c
+++ b/arch/arm/mach-davinci/mux.c
@@ -30,7 +30,7 @@ static void __iomem *pinmux_base;
30/* 30/*
31 * Sets the DAVINCI MUX register based on the table 31 * Sets the DAVINCI MUX register based on the table
32 */ 32 */
33int __init_or_module davinci_cfg_reg(const unsigned long index) 33int davinci_cfg_reg(const unsigned long index)
34{ 34{
35 static DEFINE_SPINLOCK(mux_spin_lock); 35 static DEFINE_SPINLOCK(mux_spin_lock);
36 struct davinci_soc_info *soc_info = &davinci_soc_info; 36 struct davinci_soc_info *soc_info = &davinci_soc_info;
@@ -101,7 +101,7 @@ int __init_or_module davinci_cfg_reg(const unsigned long index)
101} 101}
102EXPORT_SYMBOL(davinci_cfg_reg); 102EXPORT_SYMBOL(davinci_cfg_reg);
103 103
104int __init_or_module davinci_cfg_reg_list(const short pins[]) 104int davinci_cfg_reg_list(const short pins[])
105{ 105{
106 int i, error = -EINVAL; 106 int i, error = -EINVAL;
107 107
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 2013f73797ed..9e9dfdfad9d7 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -227,7 +227,7 @@ static void __init exynos_dt_machine_init(void)
227 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 227 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
228} 228}
229 229
230static char const *exynos_dt_compat[] __initconst = { 230static char const *const exynos_dt_compat[] __initconst = {
231 "samsung,exynos3", 231 "samsung,exynos3",
232 "samsung,exynos3250", 232 "samsung,exynos3250",
233 "samsung,exynos4", 233 "samsung,exynos4",
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 666ec3e5b03f..52e2b1a2fddb 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -587,7 +587,7 @@ static struct exynos_pm_data exynos5420_pm_data = {
587 .cpu_suspend = exynos5420_cpu_suspend, 587 .cpu_suspend = exynos5420_cpu_suspend,
588}; 588};
589 589
590static struct of_device_id exynos_pmu_of_device_ids[] = { 590static const struct of_device_id exynos_pmu_of_device_ids[] __initconst = {
591 { 591 {
592 .compatible = "samsung,exynos3250-pmu", 592 .compatible = "samsung,exynos3250-pmu",
593 .data = &exynos3250_pm_data, 593 .data = &exynos3250_pm_data,
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 07a09570175d..231fba0d03e5 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -169,7 +169,7 @@ static void __init highbank_init(void)
169 platform_device_register(&highbank_cpuidle_device); 169 platform_device_register(&highbank_cpuidle_device);
170} 170}
171 171
172static const char *highbank_match[] __initconst = { 172static const char *const highbank_match[] __initconst = {
173 "calxeda,highbank", 173 "calxeda,highbank",
174 "calxeda,ecx-2000", 174 "calxeda,ecx-2000",
175 NULL, 175 NULL,
diff --git a/arch/arm/mach-hisi/hisilicon.c b/arch/arm/mach-hisi/hisilicon.c
index 76b907078b58..c6bd7c7bd4aa 100644
--- a/arch/arm/mach-hisi/hisilicon.c
+++ b/arch/arm/mach-hisi/hisilicon.c
@@ -45,7 +45,7 @@ static void __init hi3620_map_io(void)
45 iotable_init(hi3620_io_desc, ARRAY_SIZE(hi3620_io_desc)); 45 iotable_init(hi3620_io_desc, ARRAY_SIZE(hi3620_io_desc));
46} 46}
47 47
48static const char *hi3xxx_compat[] __initconst = { 48static const char *const hi3xxx_compat[] __initconst = {
49 "hisilicon,hi3620-hi4511", 49 "hisilicon,hi3620-hi4511",
50 NULL, 50 NULL,
51}; 51};
@@ -55,7 +55,7 @@ DT_MACHINE_START(HI3620, "Hisilicon Hi3620 (Flattened Device Tree)")
55 .dt_compat = hi3xxx_compat, 55 .dt_compat = hi3xxx_compat,
56MACHINE_END 56MACHINE_END
57 57
58static const char *hix5hd2_compat[] __initconst = { 58static const char *const hix5hd2_compat[] __initconst = {
59 "hisilicon,hix5hd2", 59 "hisilicon,hix5hd2",
60 NULL, 60 NULL,
61}; 61};
@@ -64,7 +64,7 @@ DT_MACHINE_START(HIX5HD2_DT, "Hisilicon HIX5HD2 (Flattened Device Tree)")
64 .dt_compat = hix5hd2_compat, 64 .dt_compat = hix5hd2_compat,
65MACHINE_END 65MACHINE_END
66 66
67static const char *hip04_compat[] __initconst = { 67static const char *const hip04_compat[] __initconst = {
68 "hisilicon,hip04-d01", 68 "hisilicon,hip04-d01",
69 NULL, 69 NULL,
70}; 70};
@@ -73,7 +73,7 @@ DT_MACHINE_START(HIP04, "Hisilicon HiP04 (Flattened Device Tree)")
73 .dt_compat = hip04_compat, 73 .dt_compat = hip04_compat,
74MACHINE_END 74MACHINE_END
75 75
76static const char *hip01_compat[] __initconst = { 76static const char *const hip01_compat[] __initconst = {
77 "hisilicon,hip01", 77 "hisilicon,hip01",
78 "hisilicon,hip01-ca9x2", 78 "hisilicon,hip01-ca9x2",
79 NULL, 79 NULL,
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index a377f95033ae..0411f0664c15 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -68,7 +68,7 @@ int imx_mmdc_get_ddr_type(void)
68 return ddr_type; 68 return ddr_type;
69} 69}
70 70
71static struct of_device_id imx_mmdc_dt_ids[] = { 71static const struct of_device_id imx_mmdc_dt_ids[] = {
72 { .compatible = "fsl,imx6q-mmdc", }, 72 { .compatible = "fsl,imx6q-mmdc", },
73 { /* sentinel */ } 73 { /* sentinel */ }
74}; 74};
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index 6a722860e34d..b02439019963 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -245,8 +245,10 @@ static inline void outb(u8 value, u32 addr)
245} 245}
246 246
247#define outsb outsb 247#define outsb outsb
248static inline void outsb(u32 io_addr, const u8 *vaddr, u32 count) 248static inline void outsb(u32 io_addr, const void *p, u32 count)
249{ 249{
250 const u8 *vaddr = p;
251
250 while (count--) 252 while (count--)
251 outb(*vaddr++, io_addr); 253 outb(*vaddr++, io_addr);
252} 254}
@@ -262,8 +264,9 @@ static inline void outw(u16 value, u32 addr)
262} 264}
263 265
264#define outsw outsw 266#define outsw outsw
265static inline void outsw(u32 io_addr, const u16 *vaddr, u32 count) 267static inline void outsw(u32 io_addr, const void *p, u32 count)
266{ 268{
269 const u16 *vaddr = p;
267 while (count--) 270 while (count--)
268 outw(cpu_to_le16(*vaddr++), io_addr); 271 outw(cpu_to_le16(*vaddr++), io_addr);
269} 272}
@@ -275,8 +278,9 @@ static inline void outl(u32 value, u32 addr)
275} 278}
276 279
277#define outsl outsl 280#define outsl outsl
278static inline void outsl(u32 io_addr, const u32 *vaddr, u32 count) 281static inline void outsl(u32 io_addr, const void *p, u32 count)
279{ 282{
283 const u32 *vaddr = p;
280 while (count--) 284 while (count--)
281 outl(cpu_to_le32(*vaddr++), io_addr); 285 outl(cpu_to_le32(*vaddr++), io_addr);
282} 286}
@@ -294,8 +298,9 @@ static inline u8 inb(u32 addr)
294} 298}
295 299
296#define insb insb 300#define insb insb
297static inline void insb(u32 io_addr, u8 *vaddr, u32 count) 301static inline void insb(u32 io_addr, void *p, u32 count)
298{ 302{
303 u8 *vaddr = p;
299 while (count--) 304 while (count--)
300 *vaddr++ = inb(io_addr); 305 *vaddr++ = inb(io_addr);
301} 306}
@@ -313,8 +318,9 @@ static inline u16 inw(u32 addr)
313} 318}
314 319
315#define insw insw 320#define insw insw
316static inline void insw(u32 io_addr, u16 *vaddr, u32 count) 321static inline void insw(u32 io_addr, void *p, u32 count)
317{ 322{
323 u16 *vaddr = p;
318 while (count--) 324 while (count--)
319 *vaddr++ = le16_to_cpu(inw(io_addr)); 325 *vaddr++ = le16_to_cpu(inw(io_addr));
320} 326}
@@ -330,8 +336,9 @@ static inline u32 inl(u32 addr)
330} 336}
331 337
332#define insl insl 338#define insl insl
333static inline void insl(u32 io_addr, u32 *vaddr, u32 count) 339static inline void insl(u32 io_addr, void *p, u32 count)
334{ 340{
341 u32 *vaddr = p;
335 while (count--) 342 while (count--)
336 *vaddr++ = le32_to_cpu(inl(io_addr)); 343 *vaddr++ = le32_to_cpu(inl(io_addr));
337} 344}
diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
index 7f352de26099..06620875813a 100644
--- a/arch/arm/mach-keystone/keystone.c
+++ b/arch/arm/mach-keystone/keystone.c
@@ -103,7 +103,7 @@ static void __init keystone_init_meminfo(void)
103 pr_info("Switching to high address space at 0x%llx\n", (u64)offset); 103 pr_info("Switching to high address space at 0x%llx\n", (u64)offset);
104} 104}
105 105
106static const char *keystone_match[] __initconst = { 106static const char *const keystone_match[] __initconst = {
107 "ti,keystone", 107 "ti,keystone",
108 NULL, 108 NULL,
109}; 109};
diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c
index ef6041e7e675..41bebfd296dc 100644
--- a/arch/arm/mach-keystone/pm_domain.c
+++ b/arch/arm/mach-keystone/pm_domain.c
@@ -61,7 +61,7 @@ static struct pm_clk_notifier_block platform_domain_notifier = {
61 .pm_domain = &keystone_pm_domain, 61 .pm_domain = &keystone_pm_domain,
62}; 62};
63 63
64static struct of_device_id of_keystone_table[] = { 64static const struct of_device_id of_keystone_table[] = {
65 {.compatible = "ti,keystone"}, 65 {.compatible = "ti,keystone"},
66 { /* end of list */ }, 66 { /* end of list */ },
67}; 67};
diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c
index 2756351dbb35..10bfa03e58d4 100644
--- a/arch/arm/mach-mmp/time.c
+++ b/arch/arm/mach-mmp/time.c
@@ -213,7 +213,7 @@ void __init timer_init(int irq)
213} 213}
214 214
215#ifdef CONFIG_OF 215#ifdef CONFIG_OF
216static struct of_device_id mmp_timer_dt_ids[] = { 216static const struct of_device_id mmp_timer_dt_ids[] = {
217 { .compatible = "mrvl,mmp-timer", }, 217 { .compatible = "mrvl,mmp-timer", },
218 {} 218 {}
219}; 219};
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index 61bfe584a9d7..fc832040c6e9 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -20,6 +20,7 @@
20#include <linux/input.h> 20#include <linux/input.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/smc91x.h>
23 24
24#include <mach/hardware.h> 25#include <mach/hardware.h>
25#include <asm/mach-types.h> 26#include <asm/mach-types.h>
@@ -46,15 +47,20 @@ static struct resource smc91x_resources[] = {
46 [1] = { 47 [1] = {
47 .start = MSM_GPIO_TO_INT(49), 48 .start = MSM_GPIO_TO_INT(49),
48 .end = MSM_GPIO_TO_INT(49), 49 .end = MSM_GPIO_TO_INT(49),
49 .flags = IORESOURCE_IRQ, 50 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
50 }, 51 },
51}; 52};
52 53
54static struct smc91x_platdata smc91x_platdata = {
55 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
56};
57
53static struct platform_device smc91x_device = { 58static struct platform_device smc91x_device = {
54 .name = "smc91x", 59 .name = "smc91x",
55 .id = 0, 60 .id = 0,
56 .num_resources = ARRAY_SIZE(smc91x_resources), 61 .num_resources = ARRAY_SIZE(smc91x_resources),
57 .resource = smc91x_resources, 62 .resource = smc91x_resources,
63 .dev.platform_data = &smc91x_platdata,
58}; 64};
59 65
60static struct platform_device *devices[] __initdata = { 66static struct platform_device *devices[] __initdata = {
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index 4c748616ef47..10016a3bc698 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -22,6 +22,7 @@
22#include <linux/usb/msm_hsusb.h> 22#include <linux/usb/msm_hsusb.h>
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/clkdev.h> 24#include <linux/clkdev.h>
25#include <linux/smc91x.h>
25 26
26#include <asm/mach-types.h> 27#include <asm/mach-types.h>
27#include <asm/mach/arch.h> 28#include <asm/mach/arch.h>
@@ -49,15 +50,20 @@ static struct resource smc91x_resources[] = {
49 .flags = IORESOURCE_MEM, 50 .flags = IORESOURCE_MEM,
50 }, 51 },
51 [1] = { 52 [1] = {
52 .flags = IORESOURCE_IRQ, 53 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
53 }, 54 },
54}; 55};
55 56
57static struct smc91x_platdata smc91x_platdata = {
58 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
59};
60
56static struct platform_device smc91x_device = { 61static struct platform_device smc91x_device = {
57 .name = "smc91x", 62 .name = "smc91x",
58 .id = 0, 63 .id = 0,
59 .num_resources = ARRAY_SIZE(smc91x_resources), 64 .num_resources = ARRAY_SIZE(smc91x_resources),
60 .resource = smc91x_resources, 65 .resource = smc91x_resources,
66 .dev.platform_data = &smc91x_platdata,
61}; 67};
62 68
63static int __init msm_init_smc91x(void) 69static int __init msm_init_smc91x(void)
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index b5895f040caa..e46e9ea1e187 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -51,7 +51,7 @@ enum {
51 COHERENCY_FABRIC_TYPE_ARMADA_380, 51 COHERENCY_FABRIC_TYPE_ARMADA_380,
52}; 52};
53 53
54static struct of_device_id of_coherency_table[] = { 54static const struct of_device_id of_coherency_table[] = {
55 {.compatible = "marvell,coherency-fabric", 55 {.compatible = "marvell,coherency-fabric",
56 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP }, 56 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
57 {.compatible = "marvell,armada-375-coherency-fabric", 57 {.compatible = "marvell,armada-375-coherency-fabric",
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index d8ab605a44fa..8b9f5e202ccf 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -104,7 +104,7 @@ static void __iomem *pmsu_mp_base;
104 104
105static void *mvebu_cpu_resume; 105static void *mvebu_cpu_resume;
106 106
107static struct of_device_id of_pmsu_table[] = { 107static const struct of_device_id of_pmsu_table[] = {
108 { .compatible = "marvell,armada-370-pmsu", }, 108 { .compatible = "marvell,armada-370-pmsu", },
109 { .compatible = "marvell,armada-370-xp-pmsu", }, 109 { .compatible = "marvell,armada-370-xp-pmsu", },
110 { .compatible = "marvell,armada-380-pmsu", }, 110 { .compatible = "marvell,armada-380-pmsu", },
diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c
index a068cb5c2ce8..c6c132acd7a6 100644
--- a/arch/arm/mach-mvebu/system-controller.c
+++ b/arch/arm/mach-mvebu/system-controller.c
@@ -126,7 +126,7 @@ int mvebu_system_controller_get_soc_id(u32 *dev, u32 *rev)
126 return -ENODEV; 126 return -ENODEV;
127} 127}
128 128
129#ifdef CONFIG_SMP 129#if defined(CONFIG_SMP) && defined(CONFIG_MACH_MVEBU_V7)
130void mvebu_armada375_smp_wa_init(void) 130void mvebu_armada375_smp_wa_init(void)
131{ 131{
132 u32 dev, rev; 132 u32 dev, rev;
diff --git a/arch/arm/mach-nspire/nspire.c b/arch/arm/mach-nspire/nspire.c
index 3d24ebf12095..3445a5686805 100644
--- a/arch/arm/mach-nspire/nspire.c
+++ b/arch/arm/mach-nspire/nspire.c
@@ -27,7 +27,7 @@
27#include "mmio.h" 27#include "mmio.h"
28#include "clcd.h" 28#include "clcd.h"
29 29
30static const char *nspire_dt_match[] __initconst = { 30static const char *const nspire_dt_match[] __initconst = {
31 "ti,nspire", 31 "ti,nspire",
32 "ti,nspire-cx", 32 "ti,nspire-cx",
33 "ti,nspire-tp", 33 "ti,nspire-tp",
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 00d5d8f9f150..b83f18fcec9b 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -190,7 +190,7 @@ obj-$(CONFIG_SOC_OMAP2430) += clock2430.o
190obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o 190obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o
191obj-$(CONFIG_ARCH_OMAP3) += clock34xx.o clkt34xx_dpll3m2.o 191obj-$(CONFIG_ARCH_OMAP3) += clock34xx.o clkt34xx_dpll3m2.o
192obj-$(CONFIG_ARCH_OMAP3) += clock3517.o clock36xx.o 192obj-$(CONFIG_ARCH_OMAP3) += clock3517.o clock36xx.o
193obj-$(CONFIG_ARCH_OMAP3) += dpll3xxx.o cclock3xxx_data.o 193obj-$(CONFIG_ARCH_OMAP3) += dpll3xxx.o
194obj-$(CONFIG_ARCH_OMAP3) += clkt_iclk.o 194obj-$(CONFIG_ARCH_OMAP3) += clkt_iclk.o
195obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) 195obj-$(CONFIG_ARCH_OMAP4) += $(clock-common)
196obj-$(CONFIG_ARCH_OMAP4) += dpll3xxx.o dpll44xx.o 196obj-$(CONFIG_ARCH_OMAP4) += dpll3xxx.o dpll44xx.o
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
deleted file mode 100644
index e79c80bbc755..000000000000
--- a/arch/arm/mach-omap2/cclock3xxx_data.c
+++ /dev/null
@@ -1,3688 +0,0 @@
1/*
2 * OMAP3 clock data
3 *
4 * Copyright (C) 2007-2012 Texas Instruments, Inc.
5 * Copyright (C) 2007-2011 Nokia Corporation
6 *
7 * Written by Paul Walmsley
8 * Updated to COMMON clk data format by Rajendra Nayak <rnayak@ti.com>
9 * With many device clock fixes by Kevin Hilman and Jouni Högander
10 * DPLL bypass clock support added by Roman Tereshonkov
11 *
12 */
13
14/*
15 * Virtual clocks are introduced as convenient tools.
16 * They are sources for other clocks and not supposed
17 * to be requested from drivers directly.
18 */
19
20#include <linux/kernel.h>
21#include <linux/clk.h>
22#include <linux/clk-private.h>
23#include <linux/list.h>
24#include <linux/io.h>
25
26#include "soc.h"
27#include "iomap.h"
28#include "clock.h"
29#include "clock3xxx.h"
30#include "clock34xx.h"
31#include "clock36xx.h"
32#include "clock3517.h"
33#include "cm3xxx.h"
34#include "cm-regbits-34xx.h"
35#include "prm3xxx.h"
36#include "prm-regbits-34xx.h"
37#include "control.h"
38
39/*
40 * clocks
41 */
42
43#define OMAP_CM_REGADDR OMAP34XX_CM_REGADDR
44
45/* Maximum DPLL multiplier, divider values for OMAP3 */
46#define OMAP3_MAX_DPLL_MULT 2047
47#define OMAP3630_MAX_JTYPE_DPLL_MULT 4095
48#define OMAP3_MAX_DPLL_DIV 128
49
50DEFINE_CLK_FIXED_RATE(dummy_apb_pclk, CLK_IS_ROOT, 0x0, 0x0);
51
52DEFINE_CLK_FIXED_RATE(mcbsp_clks, CLK_IS_ROOT, 0x0, 0x0);
53
54DEFINE_CLK_FIXED_RATE(omap_32k_fck, CLK_IS_ROOT, 32768, 0x0);
55
56DEFINE_CLK_FIXED_RATE(pclk_ck, CLK_IS_ROOT, 27000000, 0x0);
57
58DEFINE_CLK_FIXED_RATE(rmii_ck, CLK_IS_ROOT, 50000000, 0x0);
59
60DEFINE_CLK_FIXED_RATE(secure_32k_fck, CLK_IS_ROOT, 32768, 0x0);
61
62DEFINE_CLK_FIXED_RATE(sys_altclk, CLK_IS_ROOT, 0x0, 0x0);
63
64DEFINE_CLK_FIXED_RATE(virt_12m_ck, CLK_IS_ROOT, 12000000, 0x0);
65
66DEFINE_CLK_FIXED_RATE(virt_13m_ck, CLK_IS_ROOT, 13000000, 0x0);
67
68DEFINE_CLK_FIXED_RATE(virt_16_8m_ck, CLK_IS_ROOT, 16800000, 0x0);
69
70DEFINE_CLK_FIXED_RATE(virt_19200000_ck, CLK_IS_ROOT, 19200000, 0x0);
71
72DEFINE_CLK_FIXED_RATE(virt_26000000_ck, CLK_IS_ROOT, 26000000, 0x0);
73
74DEFINE_CLK_FIXED_RATE(virt_38_4m_ck, CLK_IS_ROOT, 38400000, 0x0);
75
76static const char *osc_sys_ck_parent_names[] = {
77 "virt_12m_ck", "virt_13m_ck", "virt_19200000_ck", "virt_26000000_ck",
78 "virt_38_4m_ck", "virt_16_8m_ck",
79};
80
81DEFINE_CLK_MUX(osc_sys_ck, osc_sys_ck_parent_names, NULL, 0x0,
82 OMAP3430_PRM_CLKSEL, OMAP3430_SYS_CLKIN_SEL_SHIFT,
83 OMAP3430_SYS_CLKIN_SEL_WIDTH, 0x0, NULL);
84
85DEFINE_CLK_DIVIDER(sys_ck, "osc_sys_ck", &osc_sys_ck, 0x0,
86 OMAP3430_PRM_CLKSRC_CTRL, OMAP_SYSCLKDIV_SHIFT,
87 OMAP_SYSCLKDIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
88
89static struct dpll_data dpll3_dd = {
90 .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
91 .mult_mask = OMAP3430_CORE_DPLL_MULT_MASK,
92 .div1_mask = OMAP3430_CORE_DPLL_DIV_MASK,
93 .clk_bypass = &sys_ck,
94 .clk_ref = &sys_ck,
95 .freqsel_mask = OMAP3430_CORE_DPLL_FREQSEL_MASK,
96 .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
97 .enable_mask = OMAP3430_EN_CORE_DPLL_MASK,
98 .auto_recal_bit = OMAP3430_EN_CORE_DPLL_DRIFTGUARD_SHIFT,
99 .recal_en_bit = OMAP3430_CORE_DPLL_RECAL_EN_SHIFT,
100 .recal_st_bit = OMAP3430_CORE_DPLL_ST_SHIFT,
101 .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
102 .autoidle_mask = OMAP3430_AUTO_CORE_DPLL_MASK,
103 .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
104 .idlest_mask = OMAP3430_ST_CORE_CLK_MASK,
105 .max_multiplier = OMAP3_MAX_DPLL_MULT,
106 .min_divider = 1,
107 .max_divider = OMAP3_MAX_DPLL_DIV,
108};
109
110static struct clk dpll3_ck;
111
112static const char *dpll3_ck_parent_names[] = {
113 "sys_ck",
114 "sys_ck",
115};
116
117static const struct clk_ops dpll3_ck_ops = {
118 .init = &omap2_init_clk_clkdm,
119 .get_parent = &omap2_init_dpll_parent,
120 .recalc_rate = &omap3_dpll_recalc,
121 .round_rate = &omap2_dpll_round_rate,
122};
123
124static struct clk_hw_omap dpll3_ck_hw = {
125 .hw = {
126 .clk = &dpll3_ck,
127 },
128 .ops = &clkhwops_omap3_dpll,
129 .dpll_data = &dpll3_dd,
130 .clkdm_name = "dpll3_clkdm",
131};
132
133DEFINE_STRUCT_CLK(dpll3_ck, dpll3_ck_parent_names, dpll3_ck_ops);
134
135DEFINE_CLK_DIVIDER(dpll3_m2_ck, "dpll3_ck", &dpll3_ck, 0x0,
136 OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
137 OMAP3430_CORE_DPLL_CLKOUT_DIV_SHIFT,
138 OMAP3430_CORE_DPLL_CLKOUT_DIV_WIDTH,
139 CLK_DIVIDER_ONE_BASED, NULL);
140
141static struct clk core_ck;
142
143static const char *core_ck_parent_names[] = {
144 "dpll3_m2_ck",
145};
146
147static const struct clk_ops core_ck_ops = {};
148
149DEFINE_STRUCT_CLK_HW_OMAP(core_ck, NULL);
150DEFINE_STRUCT_CLK(core_ck, core_ck_parent_names, core_ck_ops);
151
152DEFINE_CLK_DIVIDER(l3_ick, "core_ck", &core_ck, 0x0,
153 OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
154 OMAP3430_CLKSEL_L3_SHIFT, OMAP3430_CLKSEL_L3_WIDTH,
155 CLK_DIVIDER_ONE_BASED, NULL);
156
157DEFINE_CLK_DIVIDER(l4_ick, "l3_ick", &l3_ick, 0x0,
158 OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
159 OMAP3430_CLKSEL_L4_SHIFT, OMAP3430_CLKSEL_L4_WIDTH,
160 CLK_DIVIDER_ONE_BASED, NULL);
161
162static struct clk security_l4_ick2;
163
164static const char *security_l4_ick2_parent_names[] = {
165 "l4_ick",
166};
167
168DEFINE_STRUCT_CLK_HW_OMAP(security_l4_ick2, NULL);
169DEFINE_STRUCT_CLK(security_l4_ick2, security_l4_ick2_parent_names, core_ck_ops);
170
171static struct clk aes1_ick;
172
173static const char *aes1_ick_parent_names[] = {
174 "security_l4_ick2",
175};
176
177static const struct clk_ops aes1_ick_ops = {
178 .enable = &omap2_dflt_clk_enable,
179 .disable = &omap2_dflt_clk_disable,
180 .is_enabled = &omap2_dflt_clk_is_enabled,
181};
182
183static struct clk_hw_omap aes1_ick_hw = {
184 .hw = {
185 .clk = &aes1_ick,
186 },
187 .ops = &clkhwops_iclk_wait,
188 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
189 .enable_bit = OMAP3430_EN_AES1_SHIFT,
190};
191
192DEFINE_STRUCT_CLK(aes1_ick, aes1_ick_parent_names, aes1_ick_ops);
193
194static struct clk core_l4_ick;
195
196static const struct clk_ops core_l4_ick_ops = {
197 .init = &omap2_init_clk_clkdm,
198};
199
200DEFINE_STRUCT_CLK_HW_OMAP(core_l4_ick, "core_l4_clkdm");
201DEFINE_STRUCT_CLK(core_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
202
203static struct clk aes2_ick;
204
205static const char *aes2_ick_parent_names[] = {
206 "core_l4_ick",
207};
208
209static const struct clk_ops aes2_ick_ops = {
210 .init = &omap2_init_clk_clkdm,
211 .enable = &omap2_dflt_clk_enable,
212 .disable = &omap2_dflt_clk_disable,
213 .is_enabled = &omap2_dflt_clk_is_enabled,
214};
215
216static struct clk_hw_omap aes2_ick_hw = {
217 .hw = {
218 .clk = &aes2_ick,
219 },
220 .ops = &clkhwops_iclk_wait,
221 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
222 .enable_bit = OMAP3430_EN_AES2_SHIFT,
223 .clkdm_name = "core_l4_clkdm",
224};
225
226DEFINE_STRUCT_CLK(aes2_ick, aes2_ick_parent_names, aes2_ick_ops);
227
228static struct clk dpll1_fck;
229
230static struct dpll_data dpll1_dd = {
231 .mult_div1_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
232 .mult_mask = OMAP3430_MPU_DPLL_MULT_MASK,
233 .div1_mask = OMAP3430_MPU_DPLL_DIV_MASK,
234 .clk_bypass = &dpll1_fck,
235 .clk_ref = &sys_ck,
236 .freqsel_mask = OMAP3430_MPU_DPLL_FREQSEL_MASK,
237 .control_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKEN_PLL),
238 .enable_mask = OMAP3430_EN_MPU_DPLL_MASK,
239 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
240 .auto_recal_bit = OMAP3430_EN_MPU_DPLL_DRIFTGUARD_SHIFT,
241 .recal_en_bit = OMAP3430_MPU_DPLL_RECAL_EN_SHIFT,
242 .recal_st_bit = OMAP3430_MPU_DPLL_ST_SHIFT,
243 .autoidle_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL),
244 .autoidle_mask = OMAP3430_AUTO_MPU_DPLL_MASK,
245 .idlest_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
246 .idlest_mask = OMAP3430_ST_MPU_CLK_MASK,
247 .max_multiplier = OMAP3_MAX_DPLL_MULT,
248 .min_divider = 1,
249 .max_divider = OMAP3_MAX_DPLL_DIV,
250};
251
252static struct clk dpll1_ck;
253
254static const struct clk_ops dpll1_ck_ops = {
255 .init = &omap2_init_clk_clkdm,
256 .enable = &omap3_noncore_dpll_enable,
257 .disable = &omap3_noncore_dpll_disable,
258 .get_parent = &omap2_init_dpll_parent,
259 .recalc_rate = &omap3_dpll_recalc,
260 .set_rate = &omap3_noncore_dpll_set_rate,
261 .set_parent = &omap3_noncore_dpll_set_parent,
262 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
263 .determine_rate = &omap3_noncore_dpll_determine_rate,
264 .round_rate = &omap2_dpll_round_rate,
265};
266
267static struct clk_hw_omap dpll1_ck_hw = {
268 .hw = {
269 .clk = &dpll1_ck,
270 },
271 .ops = &clkhwops_omap3_dpll,
272 .dpll_data = &dpll1_dd,
273 .clkdm_name = "dpll1_clkdm",
274};
275
276DEFINE_STRUCT_CLK(dpll1_ck, dpll3_ck_parent_names, dpll1_ck_ops);
277
278DEFINE_CLK_FIXED_FACTOR(dpll1_x2_ck, "dpll1_ck", &dpll1_ck, 0x0, 2, 1);
279
280DEFINE_CLK_DIVIDER(dpll1_x2m2_ck, "dpll1_x2_ck", &dpll1_x2_ck, 0x0,
281 OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL),
282 OMAP3430_MPU_DPLL_CLKOUT_DIV_SHIFT,
283 OMAP3430_MPU_DPLL_CLKOUT_DIV_WIDTH,
284 CLK_DIVIDER_ONE_BASED, NULL);
285
286static struct clk mpu_ck;
287
288static const char *mpu_ck_parent_names[] = {
289 "dpll1_x2m2_ck",
290};
291
292DEFINE_STRUCT_CLK_HW_OMAP(mpu_ck, "mpu_clkdm");
293DEFINE_STRUCT_CLK(mpu_ck, mpu_ck_parent_names, core_l4_ick_ops);
294
295DEFINE_CLK_DIVIDER(arm_fck, "mpu_ck", &mpu_ck, 0x0,
296 OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
297 OMAP3430_ST_MPU_CLK_SHIFT, OMAP3430_ST_MPU_CLK_WIDTH,
298 0x0, NULL);
299
300static struct clk cam_ick;
301
302static struct clk_hw_omap cam_ick_hw = {
303 .hw = {
304 .clk = &cam_ick,
305 },
306 .ops = &clkhwops_iclk,
307 .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_ICLKEN),
308 .enable_bit = OMAP3430_EN_CAM_SHIFT,
309 .clkdm_name = "cam_clkdm",
310};
311
312DEFINE_STRUCT_CLK(cam_ick, security_l4_ick2_parent_names, aes2_ick_ops);
313
314/* DPLL4 */
315/* Supplies 96MHz, 54Mhz TV DAC, DSS fclk, CAM sensor clock, emul trace clk */
316/* Type: DPLL */
317static struct dpll_data dpll4_dd;
318
319static struct dpll_data dpll4_dd_34xx __initdata = {
320 .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
321 .mult_mask = OMAP3430_PERIPH_DPLL_MULT_MASK,
322 .div1_mask = OMAP3430_PERIPH_DPLL_DIV_MASK,
323 .clk_bypass = &sys_ck,
324 .clk_ref = &sys_ck,
325 .freqsel_mask = OMAP3430_PERIPH_DPLL_FREQSEL_MASK,
326 .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
327 .enable_mask = OMAP3430_EN_PERIPH_DPLL_MASK,
328 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
329 .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
330 .recal_en_bit = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
331 .recal_st_bit = OMAP3430_PERIPH_DPLL_ST_SHIFT,
332 .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
333 .autoidle_mask = OMAP3430_AUTO_PERIPH_DPLL_MASK,
334 .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
335 .idlest_mask = OMAP3430_ST_PERIPH_CLK_MASK,
336 .max_multiplier = OMAP3_MAX_DPLL_MULT,
337 .min_divider = 1,
338 .max_divider = OMAP3_MAX_DPLL_DIV,
339};
340
341static struct dpll_data dpll4_dd_3630 __initdata = {
342 .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
343 .mult_mask = OMAP3630_PERIPH_DPLL_MULT_MASK,
344 .div1_mask = OMAP3430_PERIPH_DPLL_DIV_MASK,
345 .clk_bypass = &sys_ck,
346 .clk_ref = &sys_ck,
347 .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
348 .enable_mask = OMAP3430_EN_PERIPH_DPLL_MASK,
349 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
350 .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
351 .recal_en_bit = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
352 .recal_st_bit = OMAP3430_PERIPH_DPLL_ST_SHIFT,
353 .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
354 .autoidle_mask = OMAP3430_AUTO_PERIPH_DPLL_MASK,
355 .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
356 .idlest_mask = OMAP3430_ST_PERIPH_CLK_MASK,
357 .dco_mask = OMAP3630_PERIPH_DPLL_DCO_SEL_MASK,
358 .sddiv_mask = OMAP3630_PERIPH_DPLL_SD_DIV_MASK,
359 .max_multiplier = OMAP3630_MAX_JTYPE_DPLL_MULT,
360 .min_divider = 1,
361 .max_divider = OMAP3_MAX_DPLL_DIV,
362 .flags = DPLL_J_TYPE
363};
364
365static struct clk dpll4_ck;
366
367static const struct clk_ops dpll4_ck_ops = {
368 .init = &omap2_init_clk_clkdm,
369 .enable = &omap3_noncore_dpll_enable,
370 .disable = &omap3_noncore_dpll_disable,
371 .get_parent = &omap2_init_dpll_parent,
372 .recalc_rate = &omap3_dpll_recalc,
373 .set_rate = &omap3_dpll4_set_rate,
374 .set_parent = &omap3_noncore_dpll_set_parent,
375 .set_rate_and_parent = &omap3_dpll4_set_rate_and_parent,
376 .determine_rate = &omap3_noncore_dpll_determine_rate,
377 .round_rate = &omap2_dpll_round_rate,
378};
379
380static struct clk_hw_omap dpll4_ck_hw = {
381 .hw = {
382 .clk = &dpll4_ck,
383 },
384 .dpll_data = &dpll4_dd,
385 .ops = &clkhwops_omap3_dpll,
386 .clkdm_name = "dpll4_clkdm",
387};
388
389DEFINE_STRUCT_CLK(dpll4_ck, dpll3_ck_parent_names, dpll4_ck_ops);
390
391static const struct clk_div_table dpll4_mx_ck_div_table[] = {
392 { .div = 1, .val = 1 },
393 { .div = 2, .val = 2 },
394 { .div = 3, .val = 3 },
395 { .div = 4, .val = 4 },
396 { .div = 5, .val = 5 },
397 { .div = 6, .val = 6 },
398 { .div = 7, .val = 7 },
399 { .div = 8, .val = 8 },
400 { .div = 9, .val = 9 },
401 { .div = 10, .val = 10 },
402 { .div = 11, .val = 11 },
403 { .div = 12, .val = 12 },
404 { .div = 13, .val = 13 },
405 { .div = 14, .val = 14 },
406 { .div = 15, .val = 15 },
407 { .div = 16, .val = 16 },
408 { .div = 17, .val = 17 },
409 { .div = 18, .val = 18 },
410 { .div = 19, .val = 19 },
411 { .div = 20, .val = 20 },
412 { .div = 21, .val = 21 },
413 { .div = 22, .val = 22 },
414 { .div = 23, .val = 23 },
415 { .div = 24, .val = 24 },
416 { .div = 25, .val = 25 },
417 { .div = 26, .val = 26 },
418 { .div = 27, .val = 27 },
419 { .div = 28, .val = 28 },
420 { .div = 29, .val = 29 },
421 { .div = 30, .val = 30 },
422 { .div = 31, .val = 31 },
423 { .div = 32, .val = 32 },
424 { .div = 0 },
425};
426
427DEFINE_CLK_DIVIDER(dpll4_m5_ck, "dpll4_ck", &dpll4_ck, 0x0,
428 OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
429 OMAP3430_CLKSEL_CAM_SHIFT, OMAP3630_CLKSEL_CAM_WIDTH,
430 CLK_DIVIDER_ONE_BASED, NULL);
431
432static struct clk dpll4_m5x2_ck;
433
434static const char *dpll4_m5x2_ck_parent_names[] = {
435 "dpll4_m5_ck",
436};
437
438static const struct clk_ops dpll4_m5x2_ck_ops = {
439 .init = &omap2_init_clk_clkdm,
440 .enable = &omap2_dflt_clk_enable,
441 .disable = &omap2_dflt_clk_disable,
442 .is_enabled = &omap2_dflt_clk_is_enabled,
443 .set_rate = &omap3_clkoutx2_set_rate,
444 .recalc_rate = &omap3_clkoutx2_recalc,
445 .round_rate = &omap3_clkoutx2_round_rate,
446};
447
448static const struct clk_ops dpll4_m5x2_ck_3630_ops = {
449 .init = &omap2_init_clk_clkdm,
450 .enable = &omap36xx_pwrdn_clk_enable_with_hsdiv_restore,
451 .disable = &omap2_dflt_clk_disable,
452 .recalc_rate = &omap3_clkoutx2_recalc,
453};
454
455static struct clk_hw_omap dpll4_m5x2_ck_hw = {
456 .hw = {
457 .clk = &dpll4_m5x2_ck,
458 },
459 .ops = &clkhwops_wait,
460 .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
461 .enable_bit = OMAP3430_PWRDN_CAM_SHIFT,
462 .flags = INVERT_ENABLE,
463 .clkdm_name = "dpll4_clkdm",
464};
465
466DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
467 dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
468
469static struct clk dpll4_m5x2_ck_3630 = {
470 .name = "dpll4_m5x2_ck",
471 .hw = &dpll4_m5x2_ck_hw.hw,
472 .parent_names = dpll4_m5x2_ck_parent_names,
473 .num_parents = ARRAY_SIZE(dpll4_m5x2_ck_parent_names),
474 .ops = &dpll4_m5x2_ck_3630_ops,
475 .flags = CLK_SET_RATE_PARENT,
476};
477
478static struct clk cam_mclk;
479
480static const char *cam_mclk_parent_names[] = {
481 "dpll4_m5x2_ck",
482};
483
484static struct clk_hw_omap cam_mclk_hw = {
485 .hw = {
486 .clk = &cam_mclk,
487 },
488 .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
489 .enable_bit = OMAP3430_EN_CAM_SHIFT,
490 .clkdm_name = "cam_clkdm",
491};
492
493static struct clk cam_mclk = {
494 .name = "cam_mclk",
495 .hw = &cam_mclk_hw.hw,
496 .parent_names = cam_mclk_parent_names,
497 .num_parents = ARRAY_SIZE(cam_mclk_parent_names),
498 .ops = &aes2_ick_ops,
499 .flags = CLK_SET_RATE_PARENT,
500};
501
502static const struct clksel_rate clkout2_src_core_rates[] = {
503 { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
504 { .div = 0 }
505};
506
507static const struct clksel_rate clkout2_src_sys_rates[] = {
508 { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
509 { .div = 0 }
510};
511
512static const struct clksel_rate clkout2_src_96m_rates[] = {
513 { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
514 { .div = 0 }
515};
516
517DEFINE_CLK_DIVIDER(dpll4_m2_ck, "dpll4_ck", &dpll4_ck, 0x0,
518 OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3),
519 OMAP3430_DIV_96M_SHIFT, OMAP3630_DIV_96M_WIDTH,
520 CLK_DIVIDER_ONE_BASED, NULL);
521
522static struct clk dpll4_m2x2_ck;
523
524static const char *dpll4_m2x2_ck_parent_names[] = {
525 "dpll4_m2_ck",
526};
527
528static struct clk_hw_omap dpll4_m2x2_ck_hw = {
529 .hw = {
530 .clk = &dpll4_m2x2_ck,
531 },
532 .ops = &clkhwops_wait,
533 .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
534 .enable_bit = OMAP3430_PWRDN_96M_SHIFT,
535 .flags = INVERT_ENABLE,
536 .clkdm_name = "dpll4_clkdm",
537};
538
539DEFINE_STRUCT_CLK(dpll4_m2x2_ck, dpll4_m2x2_ck_parent_names, dpll4_m5x2_ck_ops);
540
541static struct clk dpll4_m2x2_ck_3630 = {
542 .name = "dpll4_m2x2_ck",
543 .hw = &dpll4_m2x2_ck_hw.hw,
544 .parent_names = dpll4_m2x2_ck_parent_names,
545 .num_parents = ARRAY_SIZE(dpll4_m2x2_ck_parent_names),
546 .ops = &dpll4_m5x2_ck_3630_ops,
547};
548
549static struct clk omap_96m_alwon_fck;
550
551static const char *omap_96m_alwon_fck_parent_names[] = {
552 "dpll4_m2x2_ck",
553};
554
555DEFINE_STRUCT_CLK_HW_OMAP(omap_96m_alwon_fck, NULL);
556DEFINE_STRUCT_CLK(omap_96m_alwon_fck, omap_96m_alwon_fck_parent_names,
557 core_ck_ops);
558
559static struct clk cm_96m_fck;
560
561static const char *cm_96m_fck_parent_names[] = {
562 "omap_96m_alwon_fck",
563};
564
565DEFINE_STRUCT_CLK_HW_OMAP(cm_96m_fck, NULL);
566DEFINE_STRUCT_CLK(cm_96m_fck, cm_96m_fck_parent_names, core_ck_ops);
567
568static const struct clksel_rate clkout2_src_54m_rates[] = {
569 { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
570 { .div = 0 }
571};
572
573DEFINE_CLK_DIVIDER_TABLE(dpll4_m3_ck, "dpll4_ck", &dpll4_ck, 0x0,
574 OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
575 OMAP3430_CLKSEL_TV_SHIFT, OMAP3630_CLKSEL_TV_WIDTH,
576 0, dpll4_mx_ck_div_table, NULL);
577
578static struct clk dpll4_m3x2_ck;
579
580static const char *dpll4_m3x2_ck_parent_names[] = {
581 "dpll4_m3_ck",
582};
583
584static struct clk_hw_omap dpll4_m3x2_ck_hw = {
585 .hw = {
586 .clk = &dpll4_m3x2_ck,
587 },
588 .ops = &clkhwops_wait,
589 .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
590 .enable_bit = OMAP3430_PWRDN_TV_SHIFT,
591 .flags = INVERT_ENABLE,
592 .clkdm_name = "dpll4_clkdm",
593};
594
595DEFINE_STRUCT_CLK(dpll4_m3x2_ck, dpll4_m3x2_ck_parent_names, dpll4_m5x2_ck_ops);
596
597static struct clk dpll4_m3x2_ck_3630 = {
598 .name = "dpll4_m3x2_ck",
599 .hw = &dpll4_m3x2_ck_hw.hw,
600 .parent_names = dpll4_m3x2_ck_parent_names,
601 .num_parents = ARRAY_SIZE(dpll4_m3x2_ck_parent_names),
602 .ops = &dpll4_m5x2_ck_3630_ops,
603};
604
605static const char *omap_54m_fck_parent_names[] = {
606 "dpll4_m3x2_ck", "sys_altclk",
607};
608
609DEFINE_CLK_MUX(omap_54m_fck, omap_54m_fck_parent_names, NULL, 0x0,
610 OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), OMAP3430_SOURCE_54M_SHIFT,
611 OMAP3430_SOURCE_54M_WIDTH, 0x0, NULL);
612
613static const struct clksel clkout2_src_clksel[] = {
614 { .parent = &core_ck, .rates = clkout2_src_core_rates },
615 { .parent = &sys_ck, .rates = clkout2_src_sys_rates },
616 { .parent = &cm_96m_fck, .rates = clkout2_src_96m_rates },
617 { .parent = &omap_54m_fck, .rates = clkout2_src_54m_rates },
618 { .parent = NULL },
619};
620
621static const char *clkout2_src_ck_parent_names[] = {
622 "core_ck", "sys_ck", "cm_96m_fck", "omap_54m_fck",
623};
624
625static const struct clk_ops clkout2_src_ck_ops = {
626 .init = &omap2_init_clk_clkdm,
627 .enable = &omap2_dflt_clk_enable,
628 .disable = &omap2_dflt_clk_disable,
629 .is_enabled = &omap2_dflt_clk_is_enabled,
630 .recalc_rate = &omap2_clksel_recalc,
631 .get_parent = &omap2_clksel_find_parent_index,
632 .set_parent = &omap2_clksel_set_parent,
633};
634
635DEFINE_CLK_OMAP_MUX_GATE(clkout2_src_ck, "core_clkdm",
636 clkout2_src_clksel, OMAP3430_CM_CLKOUT_CTRL,
637 OMAP3430_CLKOUT2SOURCE_MASK,
638 OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_EN_SHIFT,
639 NULL, clkout2_src_ck_parent_names, clkout2_src_ck_ops);
640
641static const struct clksel_rate omap_48m_cm96m_rates[] = {
642 { .div = 2, .val = 0, .flags = RATE_IN_3XXX },
643 { .div = 0 }
644};
645
646static const struct clksel_rate omap_48m_alt_rates[] = {
647 { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
648 { .div = 0 }
649};
650
651static const struct clksel omap_48m_clksel[] = {
652 { .parent = &cm_96m_fck, .rates = omap_48m_cm96m_rates },
653 { .parent = &sys_altclk, .rates = omap_48m_alt_rates },
654 { .parent = NULL },
655};
656
657static const char *omap_48m_fck_parent_names[] = {
658 "cm_96m_fck", "sys_altclk",
659};
660
661static struct clk omap_48m_fck;
662
663static const struct clk_ops omap_48m_fck_ops = {
664 .recalc_rate = &omap2_clksel_recalc,
665 .get_parent = &omap2_clksel_find_parent_index,
666 .set_parent = &omap2_clksel_set_parent,
667};
668
669static struct clk_hw_omap omap_48m_fck_hw = {
670 .hw = {
671 .clk = &omap_48m_fck,
672 },
673 .clksel = omap_48m_clksel,
674 .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
675 .clksel_mask = OMAP3430_SOURCE_48M_MASK,
676};
677
678DEFINE_STRUCT_CLK(omap_48m_fck, omap_48m_fck_parent_names, omap_48m_fck_ops);
679
680DEFINE_CLK_FIXED_FACTOR(omap_12m_fck, "omap_48m_fck", &omap_48m_fck, 0x0, 1, 4);
681
682static struct clk core_12m_fck;
683
684static const char *core_12m_fck_parent_names[] = {
685 "omap_12m_fck",
686};
687
688DEFINE_STRUCT_CLK_HW_OMAP(core_12m_fck, "core_l4_clkdm");
689DEFINE_STRUCT_CLK(core_12m_fck, core_12m_fck_parent_names, core_l4_ick_ops);
690
691static struct clk core_48m_fck;
692
693static const char *core_48m_fck_parent_names[] = {
694 "omap_48m_fck",
695};
696
697DEFINE_STRUCT_CLK_HW_OMAP(core_48m_fck, "core_l4_clkdm");
698DEFINE_STRUCT_CLK(core_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops);
699
700static const char *omap_96m_fck_parent_names[] = {
701 "cm_96m_fck", "sys_ck",
702};
703
704DEFINE_CLK_MUX(omap_96m_fck, omap_96m_fck_parent_names, NULL, 0x0,
705 OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
706 OMAP3430_SOURCE_96M_SHIFT, OMAP3430_SOURCE_96M_WIDTH, 0x0, NULL);
707
708static struct clk core_96m_fck;
709
710static const char *core_96m_fck_parent_names[] = {
711 "omap_96m_fck",
712};
713
714DEFINE_STRUCT_CLK_HW_OMAP(core_96m_fck, "core_l4_clkdm");
715DEFINE_STRUCT_CLK(core_96m_fck, core_96m_fck_parent_names, core_l4_ick_ops);
716
717static struct clk core_l3_ick;
718
719static const char *core_l3_ick_parent_names[] = {
720 "l3_ick",
721};
722
723DEFINE_STRUCT_CLK_HW_OMAP(core_l3_ick, "core_l3_clkdm");
724DEFINE_STRUCT_CLK(core_l3_ick, core_l3_ick_parent_names, core_l4_ick_ops);
725
726DEFINE_CLK_FIXED_FACTOR(dpll3_m2x2_ck, "dpll3_m2_ck", &dpll3_m2_ck, 0x0, 2, 1);
727
728static struct clk corex2_fck;
729
730static const char *corex2_fck_parent_names[] = {
731 "dpll3_m2x2_ck",
732};
733
734DEFINE_STRUCT_CLK_HW_OMAP(corex2_fck, NULL);
735DEFINE_STRUCT_CLK(corex2_fck, corex2_fck_parent_names, core_ck_ops);
736
737static const char *cpefuse_fck_parent_names[] = {
738 "sys_ck",
739};
740
741static struct clk cpefuse_fck;
742
743static struct clk_hw_omap cpefuse_fck_hw = {
744 .hw = {
745 .clk = &cpefuse_fck,
746 },
747 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
748 .enable_bit = OMAP3430ES2_EN_CPEFUSE_SHIFT,
749 .clkdm_name = "core_l4_clkdm",
750};
751
752DEFINE_STRUCT_CLK(cpefuse_fck, cpefuse_fck_parent_names, aes2_ick_ops);
753
754static struct clk csi2_96m_fck;
755
756static const char *csi2_96m_fck_parent_names[] = {
757 "core_96m_fck",
758};
759
760static struct clk_hw_omap csi2_96m_fck_hw = {
761 .hw = {
762 .clk = &csi2_96m_fck,
763 },
764 .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
765 .enable_bit = OMAP3430_EN_CSI2_SHIFT,
766 .clkdm_name = "cam_clkdm",
767};
768
769DEFINE_STRUCT_CLK(csi2_96m_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
770
771static struct clk d2d_26m_fck;
772
773static struct clk_hw_omap d2d_26m_fck_hw = {
774 .hw = {
775 .clk = &d2d_26m_fck,
776 },
777 .ops = &clkhwops_wait,
778 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
779 .enable_bit = OMAP3430ES1_EN_D2D_SHIFT,
780 .clkdm_name = "d2d_clkdm",
781};
782
783DEFINE_STRUCT_CLK(d2d_26m_fck, cpefuse_fck_parent_names, aes2_ick_ops);
784
785static struct clk des1_ick;
786
787static struct clk_hw_omap des1_ick_hw = {
788 .hw = {
789 .clk = &des1_ick,
790 },
791 .ops = &clkhwops_iclk_wait,
792 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
793 .enable_bit = OMAP3430_EN_DES1_SHIFT,
794};
795
796DEFINE_STRUCT_CLK(des1_ick, aes1_ick_parent_names, aes1_ick_ops);
797
798static struct clk des2_ick;
799
800static struct clk_hw_omap des2_ick_hw = {
801 .hw = {
802 .clk = &des2_ick,
803 },
804 .ops = &clkhwops_iclk_wait,
805 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
806 .enable_bit = OMAP3430_EN_DES2_SHIFT,
807 .clkdm_name = "core_l4_clkdm",
808};
809
810DEFINE_STRUCT_CLK(des2_ick, aes2_ick_parent_names, aes2_ick_ops);
811
812DEFINE_CLK_DIVIDER(dpll1_fck, "core_ck", &core_ck, 0x0,
813 OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
814 OMAP3430_MPU_CLK_SRC_SHIFT, OMAP3430_MPU_CLK_SRC_WIDTH,
815 CLK_DIVIDER_ONE_BASED, NULL);
816
817static struct clk dpll2_fck;
818
819static struct dpll_data dpll2_dd = {
820 .mult_div1_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
821 .mult_mask = OMAP3430_IVA2_DPLL_MULT_MASK,
822 .div1_mask = OMAP3430_IVA2_DPLL_DIV_MASK,
823 .clk_bypass = &dpll2_fck,
824 .clk_ref = &sys_ck,
825 .freqsel_mask = OMAP3430_IVA2_DPLL_FREQSEL_MASK,
826 .control_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL),
827 .enable_mask = OMAP3430_EN_IVA2_DPLL_MASK,
828 .modes = ((1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED) |
829 (1 << DPLL_LOW_POWER_BYPASS)),
830 .auto_recal_bit = OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT,
831 .recal_en_bit = OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_SHIFT,
832 .recal_st_bit = OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_SHIFT,
833 .autoidle_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL),
834 .autoidle_mask = OMAP3430_AUTO_IVA2_DPLL_MASK,
835 .idlest_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_IDLEST_PLL),
836 .idlest_mask = OMAP3430_ST_IVA2_CLK_MASK,
837 .max_multiplier = OMAP3_MAX_DPLL_MULT,
838 .min_divider = 1,
839 .max_divider = OMAP3_MAX_DPLL_DIV,
840};
841
842static struct clk dpll2_ck;
843
844static struct clk_hw_omap dpll2_ck_hw = {
845 .hw = {
846 .clk = &dpll2_ck,
847 },
848 .ops = &clkhwops_omap3_dpll,
849 .dpll_data = &dpll2_dd,
850 .clkdm_name = "dpll2_clkdm",
851};
852
853DEFINE_STRUCT_CLK(dpll2_ck, dpll3_ck_parent_names, dpll1_ck_ops);
854
855DEFINE_CLK_DIVIDER(dpll2_fck, "core_ck", &core_ck, 0x0,
856 OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
857 OMAP3430_IVA2_CLK_SRC_SHIFT, OMAP3430_IVA2_CLK_SRC_WIDTH,
858 CLK_DIVIDER_ONE_BASED, NULL);
859
860DEFINE_CLK_DIVIDER(dpll2_m2_ck, "dpll2_ck", &dpll2_ck, 0x0,
861 OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL2_PLL),
862 OMAP3430_IVA2_DPLL_CLKOUT_DIV_SHIFT,
863 OMAP3430_IVA2_DPLL_CLKOUT_DIV_WIDTH,
864 CLK_DIVIDER_ONE_BASED, NULL);
865
866DEFINE_CLK_DIVIDER(dpll3_m3_ck, "dpll3_ck", &dpll3_ck, 0x0,
867 OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
868 OMAP3430_DIV_DPLL3_SHIFT, OMAP3430_DIV_DPLL3_WIDTH,
869 CLK_DIVIDER_ONE_BASED, NULL);
870
871static struct clk dpll3_m3x2_ck;
872
873static const char *dpll3_m3x2_ck_parent_names[] = {
874 "dpll3_m3_ck",
875};
876
877static struct clk_hw_omap dpll3_m3x2_ck_hw = {
878 .hw = {
879 .clk = &dpll3_m3x2_ck,
880 },
881 .ops = &clkhwops_wait,
882 .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
883 .enable_bit = OMAP3430_PWRDN_EMU_CORE_SHIFT,
884 .flags = INVERT_ENABLE,
885 .clkdm_name = "dpll3_clkdm",
886};
887
888DEFINE_STRUCT_CLK(dpll3_m3x2_ck, dpll3_m3x2_ck_parent_names, dpll4_m5x2_ck_ops);
889
890static struct clk dpll3_m3x2_ck_3630 = {
891 .name = "dpll3_m3x2_ck",
892 .hw = &dpll3_m3x2_ck_hw.hw,
893 .parent_names = dpll3_m3x2_ck_parent_names,
894 .num_parents = ARRAY_SIZE(dpll3_m3x2_ck_parent_names),
895 .ops = &dpll4_m5x2_ck_3630_ops,
896};
897
898DEFINE_CLK_FIXED_FACTOR(dpll3_x2_ck, "dpll3_ck", &dpll3_ck, 0x0, 2, 1);
899
900DEFINE_CLK_DIVIDER_TABLE(dpll4_m4_ck, "dpll4_ck", &dpll4_ck, 0x0,
901 OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
902 OMAP3430_CLKSEL_DSS1_SHIFT, OMAP3630_CLKSEL_DSS1_WIDTH,
903 0, dpll4_mx_ck_div_table, NULL);
904
905static struct clk dpll4_m4x2_ck;
906
907static const char *dpll4_m4x2_ck_parent_names[] = {
908 "dpll4_m4_ck",
909};
910
911static struct clk_hw_omap dpll4_m4x2_ck_hw = {
912 .hw = {
913 .clk = &dpll4_m4x2_ck,
914 },
915 .ops = &clkhwops_wait,
916 .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
917 .enable_bit = OMAP3430_PWRDN_DSS1_SHIFT,
918 .flags = INVERT_ENABLE,
919 .clkdm_name = "dpll4_clkdm",
920};
921
922DEFINE_STRUCT_CLK_FLAGS(dpll4_m4x2_ck, dpll4_m4x2_ck_parent_names,
923 dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
924
925static struct clk dpll4_m4x2_ck_3630 = {
926 .name = "dpll4_m4x2_ck",
927 .hw = &dpll4_m4x2_ck_hw.hw,
928 .parent_names = dpll4_m4x2_ck_parent_names,
929 .num_parents = ARRAY_SIZE(dpll4_m4x2_ck_parent_names),
930 .ops = &dpll4_m5x2_ck_3630_ops,
931 .flags = CLK_SET_RATE_PARENT,
932};
933
934DEFINE_CLK_DIVIDER(dpll4_m6_ck, "dpll4_ck", &dpll4_ck, 0x0,
935 OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
936 OMAP3430_DIV_DPLL4_SHIFT, OMAP3630_DIV_DPLL4_WIDTH,
937 CLK_DIVIDER_ONE_BASED, NULL);
938
939static struct clk dpll4_m6x2_ck;
940
941static const char *dpll4_m6x2_ck_parent_names[] = {
942 "dpll4_m6_ck",
943};
944
945static struct clk_hw_omap dpll4_m6x2_ck_hw = {
946 .hw = {
947 .clk = &dpll4_m6x2_ck,
948 },
949 .ops = &clkhwops_wait,
950 .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
951 .enable_bit = OMAP3430_PWRDN_EMU_PERIPH_SHIFT,
952 .flags = INVERT_ENABLE,
953 .clkdm_name = "dpll4_clkdm",
954};
955
956DEFINE_STRUCT_CLK(dpll4_m6x2_ck, dpll4_m6x2_ck_parent_names, dpll4_m5x2_ck_ops);
957
958static struct clk dpll4_m6x2_ck_3630 = {
959 .name = "dpll4_m6x2_ck",
960 .hw = &dpll4_m6x2_ck_hw.hw,
961 .parent_names = dpll4_m6x2_ck_parent_names,
962 .num_parents = ARRAY_SIZE(dpll4_m6x2_ck_parent_names),
963 .ops = &dpll4_m5x2_ck_3630_ops,
964};
965
966DEFINE_CLK_FIXED_FACTOR(dpll4_x2_ck, "dpll4_ck", &dpll4_ck, 0x0, 2, 1);
967
968static struct dpll_data dpll5_dd = {
969 .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL4),
970 .mult_mask = OMAP3430ES2_PERIPH2_DPLL_MULT_MASK,
971 .div1_mask = OMAP3430ES2_PERIPH2_DPLL_DIV_MASK,
972 .clk_bypass = &sys_ck,
973 .clk_ref = &sys_ck,
974 .freqsel_mask = OMAP3430ES2_PERIPH2_DPLL_FREQSEL_MASK,
975 .control_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKEN2),
976 .enable_mask = OMAP3430ES2_EN_PERIPH2_DPLL_MASK,
977 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
978 .auto_recal_bit = OMAP3430ES2_EN_PERIPH2_DPLL_DRIFTGUARD_SHIFT,
979 .recal_en_bit = OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_SHIFT,
980 .recal_st_bit = OMAP3430ES2_SND_PERIPH_DPLL_ST_SHIFT,
981 .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_AUTOIDLE2_PLL),
982 .autoidle_mask = OMAP3430ES2_AUTO_PERIPH2_DPLL_MASK,
983 .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST2),
984 .idlest_mask = OMAP3430ES2_ST_PERIPH2_CLK_MASK,
985 .max_multiplier = OMAP3_MAX_DPLL_MULT,
986 .min_divider = 1,
987 .max_divider = OMAP3_MAX_DPLL_DIV,
988};
989
990static struct clk dpll5_ck;
991
992static struct clk_hw_omap dpll5_ck_hw = {
993 .hw = {
994 .clk = &dpll5_ck,
995 },
996 .ops = &clkhwops_omap3_dpll,
997 .dpll_data = &dpll5_dd,
998 .clkdm_name = "dpll5_clkdm",
999};
1000
1001DEFINE_STRUCT_CLK(dpll5_ck, dpll3_ck_parent_names, dpll1_ck_ops);
1002
1003DEFINE_CLK_DIVIDER(dpll5_m2_ck, "dpll5_ck", &dpll5_ck, 0x0,
1004 OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL5),
1005 OMAP3430ES2_DIV_120M_SHIFT, OMAP3430ES2_DIV_120M_WIDTH,
1006 CLK_DIVIDER_ONE_BASED, NULL);
1007
1008static struct clk dss1_alwon_fck_3430es1;
1009
1010static const char *dss1_alwon_fck_3430es1_parent_names[] = {
1011 "dpll4_m4x2_ck",
1012};
1013
1014static struct clk_hw_omap dss1_alwon_fck_3430es1_hw = {
1015 .hw = {
1016 .clk = &dss1_alwon_fck_3430es1,
1017 },
1018 .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
1019 .enable_bit = OMAP3430_EN_DSS1_SHIFT,
1020 .clkdm_name = "dss_clkdm",
1021};
1022
1023DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es1,
1024 dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops,
1025 CLK_SET_RATE_PARENT);
1026
1027static struct clk dss1_alwon_fck_3430es2;
1028
1029static struct clk_hw_omap dss1_alwon_fck_3430es2_hw = {
1030 .hw = {
1031 .clk = &dss1_alwon_fck_3430es2,
1032 },
1033 .ops = &clkhwops_omap3430es2_dss_usbhost_wait,
1034 .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
1035 .enable_bit = OMAP3430_EN_DSS1_SHIFT,
1036 .clkdm_name = "dss_clkdm",
1037};
1038
1039DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es2,
1040 dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops,
1041 CLK_SET_RATE_PARENT);
1042
1043static struct clk dss2_alwon_fck;
1044
1045static struct clk_hw_omap dss2_alwon_fck_hw = {
1046 .hw = {
1047 .clk = &dss2_alwon_fck,
1048 },
1049 .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
1050 .enable_bit = OMAP3430_EN_DSS2_SHIFT,
1051 .clkdm_name = "dss_clkdm",
1052};
1053
1054DEFINE_STRUCT_CLK(dss2_alwon_fck, cpefuse_fck_parent_names, aes2_ick_ops);
1055
1056static struct clk dss_96m_fck;
1057
1058static struct clk_hw_omap dss_96m_fck_hw = {
1059 .hw = {
1060 .clk = &dss_96m_fck,
1061 },
1062 .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
1063 .enable_bit = OMAP3430_EN_TV_SHIFT,
1064 .clkdm_name = "dss_clkdm",
1065};
1066
1067DEFINE_STRUCT_CLK(dss_96m_fck, core_96m_fck_parent_names, aes2_ick_ops);
1068
1069static struct clk dss_ick_3430es1;
1070
1071static struct clk_hw_omap dss_ick_3430es1_hw = {
1072 .hw = {
1073 .clk = &dss_ick_3430es1,
1074 },
1075 .ops = &clkhwops_iclk,
1076 .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
1077 .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
1078 .clkdm_name = "dss_clkdm",
1079};
1080
1081DEFINE_STRUCT_CLK(dss_ick_3430es1, security_l4_ick2_parent_names, aes2_ick_ops);
1082
1083static struct clk dss_ick_3430es2;
1084
1085static struct clk_hw_omap dss_ick_3430es2_hw = {
1086 .hw = {
1087 .clk = &dss_ick_3430es2,
1088 },
1089 .ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait,
1090 .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
1091 .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
1092 .clkdm_name = "dss_clkdm",
1093};
1094
1095DEFINE_STRUCT_CLK(dss_ick_3430es2, security_l4_ick2_parent_names, aes2_ick_ops);
1096
1097static struct clk dss_tv_fck;
1098
1099static const char *dss_tv_fck_parent_names[] = {
1100 "omap_54m_fck",
1101};
1102
1103static struct clk_hw_omap dss_tv_fck_hw = {
1104 .hw = {
1105 .clk = &dss_tv_fck,
1106 },
1107 .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
1108 .enable_bit = OMAP3430_EN_TV_SHIFT,
1109 .clkdm_name = "dss_clkdm",
1110};
1111
1112DEFINE_STRUCT_CLK(dss_tv_fck, dss_tv_fck_parent_names, aes2_ick_ops);
1113
1114static struct clk emac_fck;
1115
1116static const char *emac_fck_parent_names[] = {
1117 "rmii_ck",
1118};
1119
1120static struct clk_hw_omap emac_fck_hw = {
1121 .hw = {
1122 .clk = &emac_fck,
1123 },
1124 .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
1125 .enable_bit = AM35XX_CPGMAC_FCLK_SHIFT,
1126};
1127
1128DEFINE_STRUCT_CLK(emac_fck, emac_fck_parent_names, aes1_ick_ops);
1129
1130static struct clk ipss_ick;
1131
1132static const char *ipss_ick_parent_names[] = {
1133 "core_l3_ick",
1134};
1135
1136static struct clk_hw_omap ipss_ick_hw = {
1137 .hw = {
1138 .clk = &ipss_ick,
1139 },
1140 .ops = &clkhwops_am35xx_ipss_wait,
1141 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
1142 .enable_bit = AM35XX_EN_IPSS_SHIFT,
1143 .clkdm_name = "core_l3_clkdm",
1144};
1145
1146DEFINE_STRUCT_CLK(ipss_ick, ipss_ick_parent_names, aes2_ick_ops);
1147
1148static struct clk emac_ick;
1149
1150static const char *emac_ick_parent_names[] = {
1151 "ipss_ick",
1152};
1153
1154static struct clk_hw_omap emac_ick_hw = {
1155 .hw = {
1156 .clk = &emac_ick,
1157 },
1158 .ops = &clkhwops_am35xx_ipss_module_wait,
1159 .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
1160 .enable_bit = AM35XX_CPGMAC_VBUSP_CLK_SHIFT,
1161 .clkdm_name = "core_l3_clkdm",
1162};
1163
1164DEFINE_STRUCT_CLK(emac_ick, emac_ick_parent_names, aes2_ick_ops);
1165
1166static struct clk emu_core_alwon_ck;
1167
1168static const char *emu_core_alwon_ck_parent_names[] = {
1169 "dpll3_m3x2_ck",
1170};
1171
1172DEFINE_STRUCT_CLK_HW_OMAP(emu_core_alwon_ck, "dpll3_clkdm");
1173DEFINE_STRUCT_CLK(emu_core_alwon_ck, emu_core_alwon_ck_parent_names,
1174 core_l4_ick_ops);
1175
1176static struct clk emu_mpu_alwon_ck;
1177
1178static const char *emu_mpu_alwon_ck_parent_names[] = {
1179 "mpu_ck",
1180};
1181
1182DEFINE_STRUCT_CLK_HW_OMAP(emu_mpu_alwon_ck, NULL);
1183DEFINE_STRUCT_CLK(emu_mpu_alwon_ck, emu_mpu_alwon_ck_parent_names, core_ck_ops);
1184
1185static struct clk emu_per_alwon_ck;
1186
1187static const char *emu_per_alwon_ck_parent_names[] = {
1188 "dpll4_m6x2_ck",
1189};
1190
1191DEFINE_STRUCT_CLK_HW_OMAP(emu_per_alwon_ck, "dpll4_clkdm");
1192DEFINE_STRUCT_CLK(emu_per_alwon_ck, emu_per_alwon_ck_parent_names,
1193 core_l4_ick_ops);
1194
1195static const char *emu_src_ck_parent_names[] = {
1196 "sys_ck", "emu_core_alwon_ck", "emu_per_alwon_ck", "emu_mpu_alwon_ck",
1197};
1198
1199static const struct clksel_rate emu_src_sys_rates[] = {
1200 { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
1201 { .div = 0 },
1202};
1203
1204static const struct clksel_rate emu_src_core_rates[] = {
1205 { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
1206 { .div = 0 },
1207};
1208
1209static const struct clksel_rate emu_src_per_rates[] = {
1210 { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
1211 { .div = 0 },
1212};
1213
1214static const struct clksel_rate emu_src_mpu_rates[] = {
1215 { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
1216 { .div = 0 },
1217};
1218
1219static const struct clksel emu_src_clksel[] = {
1220 { .parent = &sys_ck, .rates = emu_src_sys_rates },
1221 { .parent = &emu_core_alwon_ck, .rates = emu_src_core_rates },
1222 { .parent = &emu_per_alwon_ck, .rates = emu_src_per_rates },
1223 { .parent = &emu_mpu_alwon_ck, .rates = emu_src_mpu_rates },
1224 { .parent = NULL },
1225};
1226
1227static const struct clk_ops emu_src_ck_ops = {
1228 .init = &omap2_init_clk_clkdm,
1229 .recalc_rate = &omap2_clksel_recalc,
1230 .get_parent = &omap2_clksel_find_parent_index,
1231 .set_parent = &omap2_clksel_set_parent,
1232 .enable = &omap2_clkops_enable_clkdm,
1233 .disable = &omap2_clkops_disable_clkdm,
1234};
1235
1236static struct clk emu_src_ck;
1237
1238static struct clk_hw_omap emu_src_ck_hw = {
1239 .hw = {
1240 .clk = &emu_src_ck,
1241 },
1242 .clksel = emu_src_clksel,
1243 .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
1244 .clksel_mask = OMAP3430_MUX_CTRL_MASK,
1245 .clkdm_name = "emu_clkdm",
1246};
1247
1248DEFINE_STRUCT_CLK(emu_src_ck, emu_src_ck_parent_names, emu_src_ck_ops);
1249
1250DEFINE_CLK_DIVIDER(atclk_fck, "emu_src_ck", &emu_src_ck, 0x0,
1251 OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
1252 OMAP3430_CLKSEL_ATCLK_SHIFT, OMAP3430_CLKSEL_ATCLK_WIDTH,
1253 CLK_DIVIDER_ONE_BASED, NULL);
1254
1255static struct clk fac_ick;
1256
1257static struct clk_hw_omap fac_ick_hw = {
1258 .hw = {
1259 .clk = &fac_ick,
1260 },
1261 .ops = &clkhwops_iclk_wait,
1262 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
1263 .enable_bit = OMAP3430ES1_EN_FAC_SHIFT,
1264 .clkdm_name = "core_l4_clkdm",
1265};
1266
1267DEFINE_STRUCT_CLK(fac_ick, aes2_ick_parent_names, aes2_ick_ops);
1268
1269static struct clk fshostusb_fck;
1270
1271static const char *fshostusb_fck_parent_names[] = {
1272 "core_48m_fck",
1273};
1274
1275static struct clk_hw_omap fshostusb_fck_hw = {
1276 .hw = {
1277 .clk = &fshostusb_fck,
1278 },
1279 .ops = &clkhwops_wait,
1280 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
1281 .enable_bit = OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
1282 .clkdm_name = "core_l4_clkdm",
1283};
1284
1285DEFINE_STRUCT_CLK(fshostusb_fck, fshostusb_fck_parent_names, aes2_ick_ops);
1286
1287static struct clk gfx_l3_ck;
1288
1289static struct clk_hw_omap gfx_l3_ck_hw = {
1290 .hw = {
1291 .clk = &gfx_l3_ck,
1292 },
1293 .ops = &clkhwops_wait,
1294 .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
1295 .enable_bit = OMAP_EN_GFX_SHIFT,
1296 .clkdm_name = "gfx_3430es1_clkdm",
1297};
1298
1299DEFINE_STRUCT_CLK(gfx_l3_ck, core_l3_ick_parent_names, aes1_ick_ops);
1300
1301DEFINE_CLK_DIVIDER(gfx_l3_fck, "l3_ick", &l3_ick, 0x0,
1302 OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
1303 OMAP_CLKSEL_GFX_SHIFT, OMAP_CLKSEL_GFX_WIDTH,
1304 CLK_DIVIDER_ONE_BASED, NULL);
1305
1306static struct clk gfx_cg1_ck;
1307
1308static const char *gfx_cg1_ck_parent_names[] = {
1309 "gfx_l3_fck",
1310};
1311
1312static struct clk_hw_omap gfx_cg1_ck_hw = {
1313 .hw = {
1314 .clk = &gfx_cg1_ck,
1315 },
1316 .ops = &clkhwops_wait,
1317 .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
1318 .enable_bit = OMAP3430ES1_EN_2D_SHIFT,
1319 .clkdm_name = "gfx_3430es1_clkdm",
1320};
1321
1322DEFINE_STRUCT_CLK(gfx_cg1_ck, gfx_cg1_ck_parent_names, aes2_ick_ops);
1323
1324static struct clk gfx_cg2_ck;
1325
1326static struct clk_hw_omap gfx_cg2_ck_hw = {
1327 .hw = {
1328 .clk = &gfx_cg2_ck,
1329 },
1330 .ops = &clkhwops_wait,
1331 .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
1332 .enable_bit = OMAP3430ES1_EN_3D_SHIFT,
1333 .clkdm_name = "gfx_3430es1_clkdm",
1334};
1335
1336DEFINE_STRUCT_CLK(gfx_cg2_ck, gfx_cg1_ck_parent_names, aes2_ick_ops);
1337
1338static struct clk gfx_l3_ick;
1339
1340static const char *gfx_l3_ick_parent_names[] = {
1341 "gfx_l3_ck",
1342};
1343
1344DEFINE_STRUCT_CLK_HW_OMAP(gfx_l3_ick, "gfx_3430es1_clkdm");
1345DEFINE_STRUCT_CLK(gfx_l3_ick, gfx_l3_ick_parent_names, core_l4_ick_ops);
1346
1347static struct clk wkup_32k_fck;
1348
1349static const char *wkup_32k_fck_parent_names[] = {
1350 "omap_32k_fck",
1351};
1352
1353DEFINE_STRUCT_CLK_HW_OMAP(wkup_32k_fck, "wkup_clkdm");
1354DEFINE_STRUCT_CLK(wkup_32k_fck, wkup_32k_fck_parent_names, core_l4_ick_ops);
1355
1356static struct clk gpio1_dbck;
1357
1358static const char *gpio1_dbck_parent_names[] = {
1359 "wkup_32k_fck",
1360};
1361
1362static struct clk_hw_omap gpio1_dbck_hw = {
1363 .hw = {
1364 .clk = &gpio1_dbck,
1365 },
1366 .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
1367 .enable_bit = OMAP3430_EN_GPIO1_SHIFT,
1368 .clkdm_name = "wkup_clkdm",
1369};
1370
1371DEFINE_STRUCT_CLK(gpio1_dbck, gpio1_dbck_parent_names, aes2_ick_ops);
1372
1373static struct clk wkup_l4_ick;
1374
1375DEFINE_STRUCT_CLK_HW_OMAP(wkup_l4_ick, "wkup_clkdm");
1376DEFINE_STRUCT_CLK(wkup_l4_ick, cpefuse_fck_parent_names, core_l4_ick_ops);
1377
1378static struct clk gpio1_ick;
1379
1380static const char *gpio1_ick_parent_names[] = {
1381 "wkup_l4_ick",
1382};
1383
1384static struct clk_hw_omap gpio1_ick_hw = {
1385 .hw = {
1386 .clk = &gpio1_ick,
1387 },
1388 .ops = &clkhwops_iclk_wait,
1389 .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
1390 .enable_bit = OMAP3430_EN_GPIO1_SHIFT,
1391 .clkdm_name = "wkup_clkdm",
1392};
1393
1394DEFINE_STRUCT_CLK(gpio1_ick, gpio1_ick_parent_names, aes2_ick_ops);
1395
1396static struct clk per_32k_alwon_fck;
1397
1398DEFINE_STRUCT_CLK_HW_OMAP(per_32k_alwon_fck, "per_clkdm");
1399DEFINE_STRUCT_CLK(per_32k_alwon_fck, wkup_32k_fck_parent_names,
1400 core_l4_ick_ops);
1401
1402static struct clk gpio2_dbck;
1403
1404static const char *gpio2_dbck_parent_names[] = {
1405 "per_32k_alwon_fck",
1406};
1407
1408static struct clk_hw_omap gpio2_dbck_hw = {
1409 .hw = {
1410 .clk = &gpio2_dbck,
1411 },
1412 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
1413 .enable_bit = OMAP3430_EN_GPIO2_SHIFT,
1414 .clkdm_name = "per_clkdm",
1415};
1416
1417DEFINE_STRUCT_CLK(gpio2_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
1418
1419static struct clk per_l4_ick;
1420
1421DEFINE_STRUCT_CLK_HW_OMAP(per_l4_ick, "per_clkdm");
1422DEFINE_STRUCT_CLK(per_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
1423
1424static struct clk gpio2_ick;
1425
1426static const char *gpio2_ick_parent_names[] = {
1427 "per_l4_ick",
1428};
1429
1430static struct clk_hw_omap gpio2_ick_hw = {
1431 .hw = {
1432 .clk = &gpio2_ick,
1433 },
1434 .ops = &clkhwops_iclk_wait,
1435 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
1436 .enable_bit = OMAP3430_EN_GPIO2_SHIFT,
1437 .clkdm_name = "per_clkdm",
1438};
1439
1440DEFINE_STRUCT_CLK(gpio2_ick, gpio2_ick_parent_names, aes2_ick_ops);
1441
1442static struct clk gpio3_dbck;
1443
1444static struct clk_hw_omap gpio3_dbck_hw = {
1445 .hw = {
1446 .clk = &gpio3_dbck,
1447 },
1448 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
1449 .enable_bit = OMAP3430_EN_GPIO3_SHIFT,
1450 .clkdm_name = "per_clkdm",
1451};
1452
1453DEFINE_STRUCT_CLK(gpio3_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
1454
1455static struct clk gpio3_ick;
1456
1457static struct clk_hw_omap gpio3_ick_hw = {
1458 .hw = {
1459 .clk = &gpio3_ick,
1460 },
1461 .ops = &clkhwops_iclk_wait,
1462 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
1463 .enable_bit = OMAP3430_EN_GPIO3_SHIFT,
1464 .clkdm_name = "per_clkdm",
1465};
1466
1467DEFINE_STRUCT_CLK(gpio3_ick, gpio2_ick_parent_names, aes2_ick_ops);
1468
1469static struct clk gpio4_dbck;
1470
1471static struct clk_hw_omap gpio4_dbck_hw = {
1472 .hw = {
1473 .clk = &gpio4_dbck,
1474 },
1475 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
1476 .enable_bit = OMAP3430_EN_GPIO4_SHIFT,
1477 .clkdm_name = "per_clkdm",
1478};
1479
1480DEFINE_STRUCT_CLK(gpio4_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
1481
1482static struct clk gpio4_ick;
1483
1484static struct clk_hw_omap gpio4_ick_hw = {
1485 .hw = {
1486 .clk = &gpio4_ick,
1487 },
1488 .ops = &clkhwops_iclk_wait,
1489 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
1490 .enable_bit = OMAP3430_EN_GPIO4_SHIFT,
1491 .clkdm_name = "per_clkdm",
1492};
1493
1494DEFINE_STRUCT_CLK(gpio4_ick, gpio2_ick_parent_names, aes2_ick_ops);
1495
1496static struct clk gpio5_dbck;
1497
1498static struct clk_hw_omap gpio5_dbck_hw = {
1499 .hw = {
1500 .clk = &gpio5_dbck,
1501 },
1502 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
1503 .enable_bit = OMAP3430_EN_GPIO5_SHIFT,
1504 .clkdm_name = "per_clkdm",
1505};
1506
1507DEFINE_STRUCT_CLK(gpio5_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
1508
1509static struct clk gpio5_ick;
1510
1511static struct clk_hw_omap gpio5_ick_hw = {
1512 .hw = {
1513 .clk = &gpio5_ick,
1514 },
1515 .ops = &clkhwops_iclk_wait,
1516 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
1517 .enable_bit = OMAP3430_EN_GPIO5_SHIFT,
1518 .clkdm_name = "per_clkdm",
1519};
1520
1521DEFINE_STRUCT_CLK(gpio5_ick, gpio2_ick_parent_names, aes2_ick_ops);
1522
1523static struct clk gpio6_dbck;
1524
1525static struct clk_hw_omap gpio6_dbck_hw = {
1526 .hw = {
1527 .clk = &gpio6_dbck,
1528 },
1529 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
1530 .enable_bit = OMAP3430_EN_GPIO6_SHIFT,
1531 .clkdm_name = "per_clkdm",
1532};
1533
1534DEFINE_STRUCT_CLK(gpio6_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
1535
1536static struct clk gpio6_ick;
1537
1538static struct clk_hw_omap gpio6_ick_hw = {
1539 .hw = {
1540 .clk = &gpio6_ick,
1541 },
1542 .ops = &clkhwops_iclk_wait,
1543 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
1544 .enable_bit = OMAP3430_EN_GPIO6_SHIFT,
1545 .clkdm_name = "per_clkdm",
1546};
1547
1548DEFINE_STRUCT_CLK(gpio6_ick, gpio2_ick_parent_names, aes2_ick_ops);
1549
1550static struct clk gpmc_fck;
1551
1552static struct clk_hw_omap gpmc_fck_hw = {
1553 .hw = {
1554 .clk = &gpmc_fck,
1555 },
1556 .flags = ENABLE_ON_INIT,
1557 .clkdm_name = "core_l3_clkdm",
1558};
1559
1560DEFINE_STRUCT_CLK(gpmc_fck, ipss_ick_parent_names, core_l4_ick_ops);
1561
1562static const struct clksel omap343x_gpt_clksel[] = {
1563 { .parent = &omap_32k_fck, .rates = gpt_32k_rates },
1564 { .parent = &sys_ck, .rates = gpt_sys_rates },
1565 { .parent = NULL },
1566};
1567
1568static const char *gpt10_fck_parent_names[] = {
1569 "omap_32k_fck", "sys_ck",
1570};
1571
1572DEFINE_CLK_OMAP_MUX_GATE(gpt10_fck, "core_l4_clkdm", omap343x_gpt_clksel,
1573 OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
1574 OMAP3430_CLKSEL_GPT10_MASK,
1575 OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
1576 OMAP3430_EN_GPT10_SHIFT, &clkhwops_wait,
1577 gpt10_fck_parent_names, clkout2_src_ck_ops);
1578
1579static struct clk gpt10_ick;
1580
1581static struct clk_hw_omap gpt10_ick_hw = {
1582 .hw = {
1583 .clk = &gpt10_ick,
1584 },
1585 .ops = &clkhwops_iclk_wait,
1586 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
1587 .enable_bit = OMAP3430_EN_GPT10_SHIFT,
1588 .clkdm_name = "core_l4_clkdm",
1589};
1590
1591DEFINE_STRUCT_CLK(gpt10_ick, aes2_ick_parent_names, aes2_ick_ops);
1592
1593DEFINE_CLK_OMAP_MUX_GATE(gpt11_fck, "core_l4_clkdm", omap343x_gpt_clksel,
1594 OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
1595 OMAP3430_CLKSEL_GPT11_MASK,
1596 OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
1597 OMAP3430_EN_GPT11_SHIFT, &clkhwops_wait,
1598 gpt10_fck_parent_names, clkout2_src_ck_ops);
1599
1600static struct clk gpt11_ick;
1601
1602static struct clk_hw_omap gpt11_ick_hw = {
1603 .hw = {
1604 .clk = &gpt11_ick,
1605 },
1606 .ops = &clkhwops_iclk_wait,
1607 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
1608 .enable_bit = OMAP3430_EN_GPT11_SHIFT,
1609 .clkdm_name = "core_l4_clkdm",
1610};
1611
1612DEFINE_STRUCT_CLK(gpt11_ick, aes2_ick_parent_names, aes2_ick_ops);
1613
1614static struct clk gpt12_fck;
1615
1616static const char *gpt12_fck_parent_names[] = {
1617 "secure_32k_fck",
1618};
1619
1620DEFINE_STRUCT_CLK_HW_OMAP(gpt12_fck, "wkup_clkdm");
1621DEFINE_STRUCT_CLK(gpt12_fck, gpt12_fck_parent_names, core_l4_ick_ops);
1622
1623static struct clk gpt12_ick;
1624
1625static struct clk_hw_omap gpt12_ick_hw = {
1626 .hw = {
1627 .clk = &gpt12_ick,
1628 },
1629 .ops = &clkhwops_iclk_wait,
1630 .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
1631 .enable_bit = OMAP3430_EN_GPT12_SHIFT,
1632 .clkdm_name = "wkup_clkdm",
1633};
1634
1635DEFINE_STRUCT_CLK(gpt12_ick, gpio1_ick_parent_names, aes2_ick_ops);
1636
1637DEFINE_CLK_OMAP_MUX_GATE(gpt1_fck, "wkup_clkdm", omap343x_gpt_clksel,
1638 OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
1639 OMAP3430_CLKSEL_GPT1_MASK,
1640 OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
1641 OMAP3430_EN_GPT1_SHIFT, &clkhwops_wait,
1642 gpt10_fck_parent_names, clkout2_src_ck_ops);
1643
1644static struct clk gpt1_ick;
1645
1646static struct clk_hw_omap gpt1_ick_hw = {
1647 .hw = {
1648 .clk = &gpt1_ick,
1649 },
1650 .ops = &clkhwops_iclk_wait,
1651 .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
1652 .enable_bit = OMAP3430_EN_GPT1_SHIFT,
1653 .clkdm_name = "wkup_clkdm",
1654};
1655
1656DEFINE_STRUCT_CLK(gpt1_ick, gpio1_ick_parent_names, aes2_ick_ops);
1657
1658DEFINE_CLK_OMAP_MUX_GATE(gpt2_fck, "per_clkdm", omap343x_gpt_clksel,
1659 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
1660 OMAP3430_CLKSEL_GPT2_MASK,
1661 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
1662 OMAP3430_EN_GPT2_SHIFT, &clkhwops_wait,
1663 gpt10_fck_parent_names, clkout2_src_ck_ops);
1664
1665static struct clk gpt2_ick;
1666
1667static struct clk_hw_omap gpt2_ick_hw = {
1668 .hw = {
1669 .clk = &gpt2_ick,
1670 },
1671 .ops = &clkhwops_iclk_wait,
1672 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
1673 .enable_bit = OMAP3430_EN_GPT2_SHIFT,
1674 .clkdm_name = "per_clkdm",
1675};
1676
1677DEFINE_STRUCT_CLK(gpt2_ick, gpio2_ick_parent_names, aes2_ick_ops);
1678
1679DEFINE_CLK_OMAP_MUX_GATE(gpt3_fck, "per_clkdm", omap343x_gpt_clksel,
1680 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
1681 OMAP3430_CLKSEL_GPT3_MASK,
1682 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
1683 OMAP3430_EN_GPT3_SHIFT, &clkhwops_wait,
1684 gpt10_fck_parent_names, clkout2_src_ck_ops);
1685
1686static struct clk gpt3_ick;
1687
1688static struct clk_hw_omap gpt3_ick_hw = {
1689 .hw = {
1690 .clk = &gpt3_ick,
1691 },
1692 .ops = &clkhwops_iclk_wait,
1693 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
1694 .enable_bit = OMAP3430_EN_GPT3_SHIFT,
1695 .clkdm_name = "per_clkdm",
1696};
1697
1698DEFINE_STRUCT_CLK(gpt3_ick, gpio2_ick_parent_names, aes2_ick_ops);
1699
1700DEFINE_CLK_OMAP_MUX_GATE(gpt4_fck, "per_clkdm", omap343x_gpt_clksel,
1701 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
1702 OMAP3430_CLKSEL_GPT4_MASK,
1703 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
1704 OMAP3430_EN_GPT4_SHIFT, &clkhwops_wait,
1705 gpt10_fck_parent_names, clkout2_src_ck_ops);
1706
1707static struct clk gpt4_ick;
1708
1709static struct clk_hw_omap gpt4_ick_hw = {
1710 .hw = {
1711 .clk = &gpt4_ick,
1712 },
1713 .ops = &clkhwops_iclk_wait,
1714 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
1715 .enable_bit = OMAP3430_EN_GPT4_SHIFT,
1716 .clkdm_name = "per_clkdm",
1717};
1718
1719DEFINE_STRUCT_CLK(gpt4_ick, gpio2_ick_parent_names, aes2_ick_ops);
1720
1721DEFINE_CLK_OMAP_MUX_GATE(gpt5_fck, "per_clkdm", omap343x_gpt_clksel,
1722 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
1723 OMAP3430_CLKSEL_GPT5_MASK,
1724 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
1725 OMAP3430_EN_GPT5_SHIFT, &clkhwops_wait,
1726 gpt10_fck_parent_names, clkout2_src_ck_ops);
1727
1728static struct clk gpt5_ick;
1729
1730static struct clk_hw_omap gpt5_ick_hw = {
1731 .hw = {
1732 .clk = &gpt5_ick,
1733 },
1734 .ops = &clkhwops_iclk_wait,
1735 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
1736 .enable_bit = OMAP3430_EN_GPT5_SHIFT,
1737 .clkdm_name = "per_clkdm",
1738};
1739
1740DEFINE_STRUCT_CLK(gpt5_ick, gpio2_ick_parent_names, aes2_ick_ops);
1741
1742DEFINE_CLK_OMAP_MUX_GATE(gpt6_fck, "per_clkdm", omap343x_gpt_clksel,
1743 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
1744 OMAP3430_CLKSEL_GPT6_MASK,
1745 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
1746 OMAP3430_EN_GPT6_SHIFT, &clkhwops_wait,
1747 gpt10_fck_parent_names, clkout2_src_ck_ops);
1748
1749static struct clk gpt6_ick;
1750
1751static struct clk_hw_omap gpt6_ick_hw = {
1752 .hw = {
1753 .clk = &gpt6_ick,
1754 },
1755 .ops = &clkhwops_iclk_wait,
1756 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
1757 .enable_bit = OMAP3430_EN_GPT6_SHIFT,
1758 .clkdm_name = "per_clkdm",
1759};
1760
1761DEFINE_STRUCT_CLK(gpt6_ick, gpio2_ick_parent_names, aes2_ick_ops);
1762
1763DEFINE_CLK_OMAP_MUX_GATE(gpt7_fck, "per_clkdm", omap343x_gpt_clksel,
1764 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
1765 OMAP3430_CLKSEL_GPT7_MASK,
1766 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
1767 OMAP3430_EN_GPT7_SHIFT, &clkhwops_wait,
1768 gpt10_fck_parent_names, clkout2_src_ck_ops);
1769
1770static struct clk gpt7_ick;
1771
1772static struct clk_hw_omap gpt7_ick_hw = {
1773 .hw = {
1774 .clk = &gpt7_ick,
1775 },
1776 .ops = &clkhwops_iclk_wait,
1777 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
1778 .enable_bit = OMAP3430_EN_GPT7_SHIFT,
1779 .clkdm_name = "per_clkdm",
1780};
1781
1782DEFINE_STRUCT_CLK(gpt7_ick, gpio2_ick_parent_names, aes2_ick_ops);
1783
1784DEFINE_CLK_OMAP_MUX_GATE(gpt8_fck, "per_clkdm", omap343x_gpt_clksel,
1785 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
1786 OMAP3430_CLKSEL_GPT8_MASK,
1787 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
1788 OMAP3430_EN_GPT8_SHIFT, &clkhwops_wait,
1789 gpt10_fck_parent_names, clkout2_src_ck_ops);
1790
1791static struct clk gpt8_ick;
1792
1793static struct clk_hw_omap gpt8_ick_hw = {
1794 .hw = {
1795 .clk = &gpt8_ick,
1796 },
1797 .ops = &clkhwops_iclk_wait,
1798 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
1799 .enable_bit = OMAP3430_EN_GPT8_SHIFT,
1800 .clkdm_name = "per_clkdm",
1801};
1802
1803DEFINE_STRUCT_CLK(gpt8_ick, gpio2_ick_parent_names, aes2_ick_ops);
1804
1805DEFINE_CLK_OMAP_MUX_GATE(gpt9_fck, "per_clkdm", omap343x_gpt_clksel,
1806 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
1807 OMAP3430_CLKSEL_GPT9_MASK,
1808 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
1809 OMAP3430_EN_GPT9_SHIFT, &clkhwops_wait,
1810 gpt10_fck_parent_names, clkout2_src_ck_ops);
1811
1812static struct clk gpt9_ick;
1813
1814static struct clk_hw_omap gpt9_ick_hw = {
1815 .hw = {
1816 .clk = &gpt9_ick,
1817 },
1818 .ops = &clkhwops_iclk_wait,
1819 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
1820 .enable_bit = OMAP3430_EN_GPT9_SHIFT,
1821 .clkdm_name = "per_clkdm",
1822};
1823
1824DEFINE_STRUCT_CLK(gpt9_ick, gpio2_ick_parent_names, aes2_ick_ops);
1825
1826static struct clk hdq_fck;
1827
1828static const char *hdq_fck_parent_names[] = {
1829 "core_12m_fck",
1830};
1831
1832static struct clk_hw_omap hdq_fck_hw = {
1833 .hw = {
1834 .clk = &hdq_fck,
1835 },
1836 .ops = &clkhwops_wait,
1837 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
1838 .enable_bit = OMAP3430_EN_HDQ_SHIFT,
1839 .clkdm_name = "core_l4_clkdm",
1840};
1841
1842DEFINE_STRUCT_CLK(hdq_fck, hdq_fck_parent_names, aes2_ick_ops);
1843
1844static struct clk hdq_ick;
1845
1846static struct clk_hw_omap hdq_ick_hw = {
1847 .hw = {
1848 .clk = &hdq_ick,
1849 },
1850 .ops = &clkhwops_iclk_wait,
1851 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
1852 .enable_bit = OMAP3430_EN_HDQ_SHIFT,
1853 .clkdm_name = "core_l4_clkdm",
1854};
1855
1856DEFINE_STRUCT_CLK(hdq_ick, aes2_ick_parent_names, aes2_ick_ops);
1857
1858static struct clk hecc_ck;
1859
1860static struct clk_hw_omap hecc_ck_hw = {
1861 .hw = {
1862 .clk = &hecc_ck,
1863 },
1864 .ops = &clkhwops_am35xx_ipss_module_wait,
1865 .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
1866 .enable_bit = AM35XX_HECC_VBUSP_CLK_SHIFT,
1867 .clkdm_name = "core_l3_clkdm",
1868};
1869
1870DEFINE_STRUCT_CLK(hecc_ck, cpefuse_fck_parent_names, aes2_ick_ops);
1871
1872static struct clk hsotgusb_fck_am35xx;
1873
1874static struct clk_hw_omap hsotgusb_fck_am35xx_hw = {
1875 .hw = {
1876 .clk = &hsotgusb_fck_am35xx,
1877 },
1878 .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
1879 .enable_bit = AM35XX_USBOTG_FCLK_SHIFT,
1880 .clkdm_name = "core_l3_clkdm",
1881};
1882
1883DEFINE_STRUCT_CLK(hsotgusb_fck_am35xx, cpefuse_fck_parent_names, aes2_ick_ops);
1884
1885static struct clk hsotgusb_ick_3430es1;
1886
1887static struct clk_hw_omap hsotgusb_ick_3430es1_hw = {
1888 .hw = {
1889 .clk = &hsotgusb_ick_3430es1,
1890 },
1891 .ops = &clkhwops_iclk,
1892 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
1893 .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
1894 .clkdm_name = "core_l3_clkdm",
1895};
1896
1897DEFINE_STRUCT_CLK(hsotgusb_ick_3430es1, ipss_ick_parent_names, aes2_ick_ops);
1898
1899static struct clk hsotgusb_ick_3430es2;
1900
1901static struct clk_hw_omap hsotgusb_ick_3430es2_hw = {
1902 .hw = {
1903 .clk = &hsotgusb_ick_3430es2,
1904 },
1905 .ops = &clkhwops_omap3430es2_iclk_hsotgusb_wait,
1906 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
1907 .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
1908 .clkdm_name = "core_l3_clkdm",
1909};
1910
1911DEFINE_STRUCT_CLK(hsotgusb_ick_3430es2, ipss_ick_parent_names, aes2_ick_ops);
1912
1913static struct clk hsotgusb_ick_am35xx;
1914
1915static struct clk_hw_omap hsotgusb_ick_am35xx_hw = {
1916 .hw = {
1917 .clk = &hsotgusb_ick_am35xx,
1918 },
1919 .ops = &clkhwops_am35xx_ipss_module_wait,
1920 .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
1921 .enable_bit = AM35XX_USBOTG_VBUSP_CLK_SHIFT,
1922 .clkdm_name = "core_l3_clkdm",
1923};
1924
1925DEFINE_STRUCT_CLK(hsotgusb_ick_am35xx, emac_ick_parent_names, aes2_ick_ops);
1926
1927static struct clk i2c1_fck;
1928
1929static struct clk_hw_omap i2c1_fck_hw = {
1930 .hw = {
1931 .clk = &i2c1_fck,
1932 },
1933 .ops = &clkhwops_wait,
1934 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
1935 .enable_bit = OMAP3430_EN_I2C1_SHIFT,
1936 .clkdm_name = "core_l4_clkdm",
1937};
1938
1939DEFINE_STRUCT_CLK(i2c1_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
1940
1941static struct clk i2c1_ick;
1942
1943static struct clk_hw_omap i2c1_ick_hw = {
1944 .hw = {
1945 .clk = &i2c1_ick,
1946 },
1947 .ops = &clkhwops_iclk_wait,
1948 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
1949 .enable_bit = OMAP3430_EN_I2C1_SHIFT,
1950 .clkdm_name = "core_l4_clkdm",
1951};
1952
1953DEFINE_STRUCT_CLK(i2c1_ick, aes2_ick_parent_names, aes2_ick_ops);
1954
1955static struct clk i2c2_fck;
1956
1957static struct clk_hw_omap i2c2_fck_hw = {
1958 .hw = {
1959 .clk = &i2c2_fck,
1960 },
1961 .ops = &clkhwops_wait,
1962 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
1963 .enable_bit = OMAP3430_EN_I2C2_SHIFT,
1964 .clkdm_name = "core_l4_clkdm",
1965};
1966
1967DEFINE_STRUCT_CLK(i2c2_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
1968
1969static struct clk i2c2_ick;
1970
1971static struct clk_hw_omap i2c2_ick_hw = {
1972 .hw = {
1973 .clk = &i2c2_ick,
1974 },
1975 .ops = &clkhwops_iclk_wait,
1976 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
1977 .enable_bit = OMAP3430_EN_I2C2_SHIFT,
1978 .clkdm_name = "core_l4_clkdm",
1979};
1980
1981DEFINE_STRUCT_CLK(i2c2_ick, aes2_ick_parent_names, aes2_ick_ops);
1982
1983static struct clk i2c3_fck;
1984
1985static struct clk_hw_omap i2c3_fck_hw = {
1986 .hw = {
1987 .clk = &i2c3_fck,
1988 },
1989 .ops = &clkhwops_wait,
1990 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
1991 .enable_bit = OMAP3430_EN_I2C3_SHIFT,
1992 .clkdm_name = "core_l4_clkdm",
1993};
1994
1995DEFINE_STRUCT_CLK(i2c3_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
1996
1997static struct clk i2c3_ick;
1998
1999static struct clk_hw_omap i2c3_ick_hw = {
2000 .hw = {
2001 .clk = &i2c3_ick,
2002 },
2003 .ops = &clkhwops_iclk_wait,
2004 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2005 .enable_bit = OMAP3430_EN_I2C3_SHIFT,
2006 .clkdm_name = "core_l4_clkdm",
2007};
2008
2009DEFINE_STRUCT_CLK(i2c3_ick, aes2_ick_parent_names, aes2_ick_ops);
2010
2011static struct clk icr_ick;
2012
2013static struct clk_hw_omap icr_ick_hw = {
2014 .hw = {
2015 .clk = &icr_ick,
2016 },
2017 .ops = &clkhwops_iclk_wait,
2018 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2019 .enable_bit = OMAP3430_EN_ICR_SHIFT,
2020 .clkdm_name = "core_l4_clkdm",
2021};
2022
2023DEFINE_STRUCT_CLK(icr_ick, aes2_ick_parent_names, aes2_ick_ops);
2024
2025static struct clk iva2_ck;
2026
2027static const char *iva2_ck_parent_names[] = {
2028 "dpll2_m2_ck",
2029};
2030
2031static struct clk_hw_omap iva2_ck_hw = {
2032 .hw = {
2033 .clk = &iva2_ck,
2034 },
2035 .ops = &clkhwops_wait,
2036 .enable_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN),
2037 .enable_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
2038 .clkdm_name = "iva2_clkdm",
2039};
2040
2041DEFINE_STRUCT_CLK(iva2_ck, iva2_ck_parent_names, aes2_ick_ops);
2042
2043static struct clk mad2d_ick;
2044
2045static struct clk_hw_omap mad2d_ick_hw = {
2046 .hw = {
2047 .clk = &mad2d_ick,
2048 },
2049 .ops = &clkhwops_iclk_wait,
2050 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
2051 .enable_bit = OMAP3430_EN_MAD2D_SHIFT,
2052 .clkdm_name = "d2d_clkdm",
2053};
2054
2055DEFINE_STRUCT_CLK(mad2d_ick, core_l3_ick_parent_names, aes2_ick_ops);
2056
2057static struct clk mailboxes_ick;
2058
2059static struct clk_hw_omap mailboxes_ick_hw = {
2060 .hw = {
2061 .clk = &mailboxes_ick,
2062 },
2063 .ops = &clkhwops_iclk_wait,
2064 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2065 .enable_bit = OMAP3430_EN_MAILBOXES_SHIFT,
2066 .clkdm_name = "core_l4_clkdm",
2067};
2068
2069DEFINE_STRUCT_CLK(mailboxes_ick, aes2_ick_parent_names, aes2_ick_ops);
2070
2071static const struct clksel_rate common_mcbsp_96m_rates[] = {
2072 { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
2073 { .div = 0 }
2074};
2075
2076static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
2077 { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
2078 { .div = 0 }
2079};
2080
2081static const struct clksel mcbsp_15_clksel[] = {
2082 { .parent = &core_96m_fck, .rates = common_mcbsp_96m_rates },
2083 { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
2084 { .parent = NULL },
2085};
2086
2087static const char *mcbsp1_fck_parent_names[] = {
2088 "core_96m_fck", "mcbsp_clks",
2089};
2090
2091DEFINE_CLK_OMAP_MUX_GATE(mcbsp1_fck, "core_l4_clkdm", mcbsp_15_clksel,
2092 OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
2093 OMAP2_MCBSP1_CLKS_MASK,
2094 OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2095 OMAP3430_EN_MCBSP1_SHIFT, &clkhwops_wait,
2096 mcbsp1_fck_parent_names, clkout2_src_ck_ops);
2097
2098static struct clk mcbsp1_ick;
2099
2100static struct clk_hw_omap mcbsp1_ick_hw = {
2101 .hw = {
2102 .clk = &mcbsp1_ick,
2103 },
2104 .ops = &clkhwops_iclk_wait,
2105 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2106 .enable_bit = OMAP3430_EN_MCBSP1_SHIFT,
2107 .clkdm_name = "core_l4_clkdm",
2108};
2109
2110DEFINE_STRUCT_CLK(mcbsp1_ick, aes2_ick_parent_names, aes2_ick_ops);
2111
2112static struct clk per_96m_fck;
2113
2114DEFINE_STRUCT_CLK_HW_OMAP(per_96m_fck, "per_clkdm");
2115DEFINE_STRUCT_CLK(per_96m_fck, cm_96m_fck_parent_names, core_l4_ick_ops);
2116
2117static const struct clksel mcbsp_234_clksel[] = {
2118 { .parent = &per_96m_fck, .rates = common_mcbsp_96m_rates },
2119 { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
2120 { .parent = NULL },
2121};
2122
2123static const char *mcbsp2_fck_parent_names[] = {
2124 "per_96m_fck", "mcbsp_clks",
2125};
2126
2127DEFINE_CLK_OMAP_MUX_GATE(mcbsp2_fck, "per_clkdm", mcbsp_234_clksel,
2128 OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
2129 OMAP2_MCBSP2_CLKS_MASK,
2130 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
2131 OMAP3430_EN_MCBSP2_SHIFT, &clkhwops_wait,
2132 mcbsp2_fck_parent_names, clkout2_src_ck_ops);
2133
2134static struct clk mcbsp2_ick;
2135
2136static struct clk_hw_omap mcbsp2_ick_hw = {
2137 .hw = {
2138 .clk = &mcbsp2_ick,
2139 },
2140 .ops = &clkhwops_iclk_wait,
2141 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
2142 .enable_bit = OMAP3430_EN_MCBSP2_SHIFT,
2143 .clkdm_name = "per_clkdm",
2144};
2145
2146DEFINE_STRUCT_CLK(mcbsp2_ick, gpio2_ick_parent_names, aes2_ick_ops);
2147
2148DEFINE_CLK_OMAP_MUX_GATE(mcbsp3_fck, "per_clkdm", mcbsp_234_clksel,
2149 OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
2150 OMAP2_MCBSP3_CLKS_MASK,
2151 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
2152 OMAP3430_EN_MCBSP3_SHIFT, &clkhwops_wait,
2153 mcbsp2_fck_parent_names, clkout2_src_ck_ops);
2154
2155static struct clk mcbsp3_ick;
2156
2157static struct clk_hw_omap mcbsp3_ick_hw = {
2158 .hw = {
2159 .clk = &mcbsp3_ick,
2160 },
2161 .ops = &clkhwops_iclk_wait,
2162 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
2163 .enable_bit = OMAP3430_EN_MCBSP3_SHIFT,
2164 .clkdm_name = "per_clkdm",
2165};
2166
2167DEFINE_STRUCT_CLK(mcbsp3_ick, gpio2_ick_parent_names, aes2_ick_ops);
2168
2169DEFINE_CLK_OMAP_MUX_GATE(mcbsp4_fck, "per_clkdm", mcbsp_234_clksel,
2170 OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
2171 OMAP2_MCBSP4_CLKS_MASK,
2172 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
2173 OMAP3430_EN_MCBSP4_SHIFT, &clkhwops_wait,
2174 mcbsp2_fck_parent_names, clkout2_src_ck_ops);
2175
2176static struct clk mcbsp4_ick;
2177
2178static struct clk_hw_omap mcbsp4_ick_hw = {
2179 .hw = {
2180 .clk = &mcbsp4_ick,
2181 },
2182 .ops = &clkhwops_iclk_wait,
2183 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
2184 .enable_bit = OMAP3430_EN_MCBSP4_SHIFT,
2185 .clkdm_name = "per_clkdm",
2186};
2187
2188DEFINE_STRUCT_CLK(mcbsp4_ick, gpio2_ick_parent_names, aes2_ick_ops);
2189
2190DEFINE_CLK_OMAP_MUX_GATE(mcbsp5_fck, "core_l4_clkdm", mcbsp_15_clksel,
2191 OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
2192 OMAP2_MCBSP5_CLKS_MASK,
2193 OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2194 OMAP3430_EN_MCBSP5_SHIFT, &clkhwops_wait,
2195 mcbsp1_fck_parent_names, clkout2_src_ck_ops);
2196
2197static struct clk mcbsp5_ick;
2198
2199static struct clk_hw_omap mcbsp5_ick_hw = {
2200 .hw = {
2201 .clk = &mcbsp5_ick,
2202 },
2203 .ops = &clkhwops_iclk_wait,
2204 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2205 .enable_bit = OMAP3430_EN_MCBSP5_SHIFT,
2206 .clkdm_name = "core_l4_clkdm",
2207};
2208
2209DEFINE_STRUCT_CLK(mcbsp5_ick, aes2_ick_parent_names, aes2_ick_ops);
2210
2211static struct clk mcspi1_fck;
2212
2213static struct clk_hw_omap mcspi1_fck_hw = {
2214 .hw = {
2215 .clk = &mcspi1_fck,
2216 },
2217 .ops = &clkhwops_wait,
2218 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2219 .enable_bit = OMAP3430_EN_MCSPI1_SHIFT,
2220 .clkdm_name = "core_l4_clkdm",
2221};
2222
2223DEFINE_STRUCT_CLK(mcspi1_fck, fshostusb_fck_parent_names, aes2_ick_ops);
2224
2225static struct clk mcspi1_ick;
2226
2227static struct clk_hw_omap mcspi1_ick_hw = {
2228 .hw = {
2229 .clk = &mcspi1_ick,
2230 },
2231 .ops = &clkhwops_iclk_wait,
2232 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2233 .enable_bit = OMAP3430_EN_MCSPI1_SHIFT,
2234 .clkdm_name = "core_l4_clkdm",
2235};
2236
2237DEFINE_STRUCT_CLK(mcspi1_ick, aes2_ick_parent_names, aes2_ick_ops);
2238
2239static struct clk mcspi2_fck;
2240
2241static struct clk_hw_omap mcspi2_fck_hw = {
2242 .hw = {
2243 .clk = &mcspi2_fck,
2244 },
2245 .ops = &clkhwops_wait,
2246 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2247 .enable_bit = OMAP3430_EN_MCSPI2_SHIFT,
2248 .clkdm_name = "core_l4_clkdm",
2249};
2250
2251DEFINE_STRUCT_CLK(mcspi2_fck, fshostusb_fck_parent_names, aes2_ick_ops);
2252
2253static struct clk mcspi2_ick;
2254
2255static struct clk_hw_omap mcspi2_ick_hw = {
2256 .hw = {
2257 .clk = &mcspi2_ick,
2258 },
2259 .ops = &clkhwops_iclk_wait,
2260 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2261 .enable_bit = OMAP3430_EN_MCSPI2_SHIFT,
2262 .clkdm_name = "core_l4_clkdm",
2263};
2264
2265DEFINE_STRUCT_CLK(mcspi2_ick, aes2_ick_parent_names, aes2_ick_ops);
2266
2267static struct clk mcspi3_fck;
2268
2269static struct clk_hw_omap mcspi3_fck_hw = {
2270 .hw = {
2271 .clk = &mcspi3_fck,
2272 },
2273 .ops = &clkhwops_wait,
2274 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2275 .enable_bit = OMAP3430_EN_MCSPI3_SHIFT,
2276 .clkdm_name = "core_l4_clkdm",
2277};
2278
2279DEFINE_STRUCT_CLK(mcspi3_fck, fshostusb_fck_parent_names, aes2_ick_ops);
2280
2281static struct clk mcspi3_ick;
2282
2283static struct clk_hw_omap mcspi3_ick_hw = {
2284 .hw = {
2285 .clk = &mcspi3_ick,
2286 },
2287 .ops = &clkhwops_iclk_wait,
2288 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2289 .enable_bit = OMAP3430_EN_MCSPI3_SHIFT,
2290 .clkdm_name = "core_l4_clkdm",
2291};
2292
2293DEFINE_STRUCT_CLK(mcspi3_ick, aes2_ick_parent_names, aes2_ick_ops);
2294
2295static struct clk mcspi4_fck;
2296
2297static struct clk_hw_omap mcspi4_fck_hw = {
2298 .hw = {
2299 .clk = &mcspi4_fck,
2300 },
2301 .ops = &clkhwops_wait,
2302 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2303 .enable_bit = OMAP3430_EN_MCSPI4_SHIFT,
2304 .clkdm_name = "core_l4_clkdm",
2305};
2306
2307DEFINE_STRUCT_CLK(mcspi4_fck, fshostusb_fck_parent_names, aes2_ick_ops);
2308
2309static struct clk mcspi4_ick;
2310
2311static struct clk_hw_omap mcspi4_ick_hw = {
2312 .hw = {
2313 .clk = &mcspi4_ick,
2314 },
2315 .ops = &clkhwops_iclk_wait,
2316 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2317 .enable_bit = OMAP3430_EN_MCSPI4_SHIFT,
2318 .clkdm_name = "core_l4_clkdm",
2319};
2320
2321DEFINE_STRUCT_CLK(mcspi4_ick, aes2_ick_parent_names, aes2_ick_ops);
2322
2323static struct clk mmchs1_fck;
2324
2325static struct clk_hw_omap mmchs1_fck_hw = {
2326 .hw = {
2327 .clk = &mmchs1_fck,
2328 },
2329 .ops = &clkhwops_wait,
2330 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2331 .enable_bit = OMAP3430_EN_MMC1_SHIFT,
2332 .clkdm_name = "core_l4_clkdm",
2333};
2334
2335DEFINE_STRUCT_CLK(mmchs1_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
2336
2337static struct clk mmchs1_ick;
2338
2339static struct clk_hw_omap mmchs1_ick_hw = {
2340 .hw = {
2341 .clk = &mmchs1_ick,
2342 },
2343 .ops = &clkhwops_iclk_wait,
2344 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2345 .enable_bit = OMAP3430_EN_MMC1_SHIFT,
2346 .clkdm_name = "core_l4_clkdm",
2347};
2348
2349DEFINE_STRUCT_CLK(mmchs1_ick, aes2_ick_parent_names, aes2_ick_ops);
2350
2351static struct clk mmchs2_fck;
2352
2353static struct clk_hw_omap mmchs2_fck_hw = {
2354 .hw = {
2355 .clk = &mmchs2_fck,
2356 },
2357 .ops = &clkhwops_wait,
2358 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2359 .enable_bit = OMAP3430_EN_MMC2_SHIFT,
2360 .clkdm_name = "core_l4_clkdm",
2361};
2362
2363DEFINE_STRUCT_CLK(mmchs2_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
2364
2365static struct clk mmchs2_ick;
2366
2367static struct clk_hw_omap mmchs2_ick_hw = {
2368 .hw = {
2369 .clk = &mmchs2_ick,
2370 },
2371 .ops = &clkhwops_iclk_wait,
2372 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2373 .enable_bit = OMAP3430_EN_MMC2_SHIFT,
2374 .clkdm_name = "core_l4_clkdm",
2375};
2376
2377DEFINE_STRUCT_CLK(mmchs2_ick, aes2_ick_parent_names, aes2_ick_ops);
2378
2379static struct clk mmchs3_fck;
2380
2381static struct clk_hw_omap mmchs3_fck_hw = {
2382 .hw = {
2383 .clk = &mmchs3_fck,
2384 },
2385 .ops = &clkhwops_wait,
2386 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2387 .enable_bit = OMAP3430ES2_EN_MMC3_SHIFT,
2388 .clkdm_name = "core_l4_clkdm",
2389};
2390
2391DEFINE_STRUCT_CLK(mmchs3_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
2392
2393static struct clk mmchs3_ick;
2394
2395static struct clk_hw_omap mmchs3_ick_hw = {
2396 .hw = {
2397 .clk = &mmchs3_ick,
2398 },
2399 .ops = &clkhwops_iclk_wait,
2400 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2401 .enable_bit = OMAP3430ES2_EN_MMC3_SHIFT,
2402 .clkdm_name = "core_l4_clkdm",
2403};
2404
2405DEFINE_STRUCT_CLK(mmchs3_ick, aes2_ick_parent_names, aes2_ick_ops);
2406
2407static struct clk modem_fck;
2408
2409static struct clk_hw_omap modem_fck_hw = {
2410 .hw = {
2411 .clk = &modem_fck,
2412 },
2413 .ops = &clkhwops_iclk_wait,
2414 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2415 .enable_bit = OMAP3430_EN_MODEM_SHIFT,
2416 .clkdm_name = "d2d_clkdm",
2417};
2418
2419DEFINE_STRUCT_CLK(modem_fck, cpefuse_fck_parent_names, aes2_ick_ops);
2420
2421static struct clk mspro_fck;
2422
2423static struct clk_hw_omap mspro_fck_hw = {
2424 .hw = {
2425 .clk = &mspro_fck,
2426 },
2427 .ops = &clkhwops_wait,
2428 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2429 .enable_bit = OMAP3430_EN_MSPRO_SHIFT,
2430 .clkdm_name = "core_l4_clkdm",
2431};
2432
2433DEFINE_STRUCT_CLK(mspro_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
2434
2435static struct clk mspro_ick;
2436
2437static struct clk_hw_omap mspro_ick_hw = {
2438 .hw = {
2439 .clk = &mspro_ick,
2440 },
2441 .ops = &clkhwops_iclk_wait,
2442 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2443 .enable_bit = OMAP3430_EN_MSPRO_SHIFT,
2444 .clkdm_name = "core_l4_clkdm",
2445};
2446
2447DEFINE_STRUCT_CLK(mspro_ick, aes2_ick_parent_names, aes2_ick_ops);
2448
2449static struct clk omap_192m_alwon_fck;
2450
2451DEFINE_STRUCT_CLK_HW_OMAP(omap_192m_alwon_fck, NULL);
2452DEFINE_STRUCT_CLK(omap_192m_alwon_fck, omap_96m_alwon_fck_parent_names,
2453 core_ck_ops);
2454
2455static struct clk omap_32ksync_ick;
2456
2457static struct clk_hw_omap omap_32ksync_ick_hw = {
2458 .hw = {
2459 .clk = &omap_32ksync_ick,
2460 },
2461 .ops = &clkhwops_iclk_wait,
2462 .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
2463 .enable_bit = OMAP3430_EN_32KSYNC_SHIFT,
2464 .clkdm_name = "wkup_clkdm",
2465};
2466
2467DEFINE_STRUCT_CLK(omap_32ksync_ick, gpio1_ick_parent_names, aes2_ick_ops);
2468
2469static const struct clksel_rate omap_96m_alwon_fck_rates[] = {
2470 { .div = 1, .val = 1, .flags = RATE_IN_36XX },
2471 { .div = 2, .val = 2, .flags = RATE_IN_36XX },
2472 { .div = 0 }
2473};
2474
2475static const struct clksel omap_96m_alwon_fck_clksel[] = {
2476 { .parent = &omap_192m_alwon_fck, .rates = omap_96m_alwon_fck_rates },
2477 { .parent = NULL }
2478};
2479
2480static struct clk omap_96m_alwon_fck_3630;
2481
2482static const char *omap_96m_alwon_fck_3630_parent_names[] = {
2483 "omap_192m_alwon_fck",
2484};
2485
2486static const struct clk_ops omap_96m_alwon_fck_3630_ops = {
2487 .set_rate = &omap2_clksel_set_rate,
2488 .recalc_rate = &omap2_clksel_recalc,
2489 .round_rate = &omap2_clksel_round_rate,
2490};
2491
2492static struct clk_hw_omap omap_96m_alwon_fck_3630_hw = {
2493 .hw = {
2494 .clk = &omap_96m_alwon_fck_3630,
2495 },
2496 .clksel = omap_96m_alwon_fck_clksel,
2497 .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
2498 .clksel_mask = OMAP3630_CLKSEL_96M_MASK,
2499};
2500
2501static struct clk omap_96m_alwon_fck_3630 = {
2502 .name = "omap_96m_alwon_fck",
2503 .hw = &omap_96m_alwon_fck_3630_hw.hw,
2504 .parent_names = omap_96m_alwon_fck_3630_parent_names,
2505 .num_parents = ARRAY_SIZE(omap_96m_alwon_fck_3630_parent_names),
2506 .ops = &omap_96m_alwon_fck_3630_ops,
2507};
2508
2509static struct clk omapctrl_ick;
2510
2511static struct clk_hw_omap omapctrl_ick_hw = {
2512 .hw = {
2513 .clk = &omapctrl_ick,
2514 },
2515 .ops = &clkhwops_iclk_wait,
2516 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2517 .enable_bit = OMAP3430_EN_OMAPCTRL_SHIFT,
2518 .flags = ENABLE_ON_INIT,
2519 .clkdm_name = "core_l4_clkdm",
2520};
2521
2522DEFINE_STRUCT_CLK(omapctrl_ick, aes2_ick_parent_names, aes2_ick_ops);
2523
2524DEFINE_CLK_DIVIDER(pclk_fck, "emu_src_ck", &emu_src_ck, 0x0,
2525 OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
2526 OMAP3430_CLKSEL_PCLK_SHIFT, OMAP3430_CLKSEL_PCLK_WIDTH,
2527 CLK_DIVIDER_ONE_BASED, NULL);
2528
2529DEFINE_CLK_DIVIDER(pclkx2_fck, "emu_src_ck", &emu_src_ck, 0x0,
2530 OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
2531 OMAP3430_CLKSEL_PCLKX2_SHIFT, OMAP3430_CLKSEL_PCLKX2_WIDTH,
2532 CLK_DIVIDER_ONE_BASED, NULL);
2533
2534static struct clk per_48m_fck;
2535
2536DEFINE_STRUCT_CLK_HW_OMAP(per_48m_fck, "per_clkdm");
2537DEFINE_STRUCT_CLK(per_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops);
2538
2539static struct clk security_l3_ick;
2540
2541DEFINE_STRUCT_CLK_HW_OMAP(security_l3_ick, NULL);
2542DEFINE_STRUCT_CLK(security_l3_ick, core_l3_ick_parent_names, core_ck_ops);
2543
2544static struct clk pka_ick;
2545
2546static const char *pka_ick_parent_names[] = {
2547 "security_l3_ick",
2548};
2549
2550static struct clk_hw_omap pka_ick_hw = {
2551 .hw = {
2552 .clk = &pka_ick,
2553 },
2554 .ops = &clkhwops_iclk_wait,
2555 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
2556 .enable_bit = OMAP3430_EN_PKA_SHIFT,
2557};
2558
2559DEFINE_STRUCT_CLK(pka_ick, pka_ick_parent_names, aes1_ick_ops);
2560
2561DEFINE_CLK_DIVIDER(rm_ick, "l4_ick", &l4_ick, 0x0,
2562 OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
2563 OMAP3430_CLKSEL_RM_SHIFT, OMAP3430_CLKSEL_RM_WIDTH,
2564 CLK_DIVIDER_ONE_BASED, NULL);
2565
2566static struct clk rng_ick;
2567
2568static struct clk_hw_omap rng_ick_hw = {
2569 .hw = {
2570 .clk = &rng_ick,
2571 },
2572 .ops = &clkhwops_iclk_wait,
2573 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
2574 .enable_bit = OMAP3430_EN_RNG_SHIFT,
2575};
2576
2577DEFINE_STRUCT_CLK(rng_ick, aes1_ick_parent_names, aes1_ick_ops);
2578
2579static struct clk sad2d_ick;
2580
2581static struct clk_hw_omap sad2d_ick_hw = {
2582 .hw = {
2583 .clk = &sad2d_ick,
2584 },
2585 .ops = &clkhwops_iclk_wait,
2586 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2587 .enable_bit = OMAP3430_EN_SAD2D_SHIFT,
2588 .clkdm_name = "d2d_clkdm",
2589};
2590
2591DEFINE_STRUCT_CLK(sad2d_ick, core_l3_ick_parent_names, aes2_ick_ops);
2592
2593static struct clk sdrc_ick;
2594
2595static struct clk_hw_omap sdrc_ick_hw = {
2596 .hw = {
2597 .clk = &sdrc_ick,
2598 },
2599 .ops = &clkhwops_wait,
2600 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2601 .enable_bit = OMAP3430_EN_SDRC_SHIFT,
2602 .flags = ENABLE_ON_INIT,
2603 .clkdm_name = "core_l3_clkdm",
2604};
2605
2606DEFINE_STRUCT_CLK(sdrc_ick, ipss_ick_parent_names, aes2_ick_ops);
2607
2608static const struct clksel_rate sgx_core_rates[] = {
2609 { .div = 2, .val = 5, .flags = RATE_IN_36XX },
2610 { .div = 3, .val = 0, .flags = RATE_IN_3XXX },
2611 { .div = 4, .val = 1, .flags = RATE_IN_3XXX },
2612 { .div = 6, .val = 2, .flags = RATE_IN_3XXX },
2613 { .div = 0 }
2614};
2615
2616static const struct clksel_rate sgx_96m_rates[] = {
2617 { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
2618 { .div = 0 }
2619};
2620
2621static const struct clksel_rate sgx_192m_rates[] = {
2622 { .div = 1, .val = 4, .flags = RATE_IN_36XX },
2623 { .div = 0 }
2624};
2625
2626static const struct clksel_rate sgx_corex2_rates[] = {
2627 { .div = 3, .val = 6, .flags = RATE_IN_36XX },
2628 { .div = 5, .val = 7, .flags = RATE_IN_36XX },
2629 { .div = 0 }
2630};
2631
2632static const struct clksel sgx_clksel[] = {
2633 { .parent = &core_ck, .rates = sgx_core_rates },
2634 { .parent = &cm_96m_fck, .rates = sgx_96m_rates },
2635 { .parent = &omap_192m_alwon_fck, .rates = sgx_192m_rates },
2636 { .parent = &corex2_fck, .rates = sgx_corex2_rates },
2637 { .parent = NULL },
2638};
2639
2640static const char *sgx_fck_parent_names[] = {
2641 "core_ck", "cm_96m_fck", "omap_192m_alwon_fck", "corex2_fck",
2642};
2643
2644static struct clk sgx_fck;
2645
2646static const struct clk_ops sgx_fck_ops = {
2647 .init = &omap2_init_clk_clkdm,
2648 .enable = &omap2_dflt_clk_enable,
2649 .disable = &omap2_dflt_clk_disable,
2650 .is_enabled = &omap2_dflt_clk_is_enabled,
2651 .recalc_rate = &omap2_clksel_recalc,
2652 .set_rate = &omap2_clksel_set_rate,
2653 .round_rate = &omap2_clksel_round_rate,
2654 .get_parent = &omap2_clksel_find_parent_index,
2655 .set_parent = &omap2_clksel_set_parent,
2656};
2657
2658DEFINE_CLK_OMAP_MUX_GATE(sgx_fck, "sgx_clkdm", sgx_clksel,
2659 OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_CLKSEL),
2660 OMAP3430ES2_CLKSEL_SGX_MASK,
2661 OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_FCLKEN),
2662 OMAP3430ES2_CM_FCLKEN_SGX_EN_SGX_SHIFT,
2663 &clkhwops_wait, sgx_fck_parent_names, sgx_fck_ops);
2664
2665static struct clk sgx_ick;
2666
2667static struct clk_hw_omap sgx_ick_hw = {
2668 .hw = {
2669 .clk = &sgx_ick,
2670 },
2671 .ops = &clkhwops_wait,
2672 .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_ICLKEN),
2673 .enable_bit = OMAP3430ES2_CM_ICLKEN_SGX_EN_SGX_SHIFT,
2674 .clkdm_name = "sgx_clkdm",
2675};
2676
2677DEFINE_STRUCT_CLK(sgx_ick, core_l3_ick_parent_names, aes2_ick_ops);
2678
2679static struct clk sha11_ick;
2680
2681static struct clk_hw_omap sha11_ick_hw = {
2682 .hw = {
2683 .clk = &sha11_ick,
2684 },
2685 .ops = &clkhwops_iclk_wait,
2686 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
2687 .enable_bit = OMAP3430_EN_SHA11_SHIFT,
2688};
2689
2690DEFINE_STRUCT_CLK(sha11_ick, aes1_ick_parent_names, aes1_ick_ops);
2691
2692static struct clk sha12_ick;
2693
2694static struct clk_hw_omap sha12_ick_hw = {
2695 .hw = {
2696 .clk = &sha12_ick,
2697 },
2698 .ops = &clkhwops_iclk_wait,
2699 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2700 .enable_bit = OMAP3430_EN_SHA12_SHIFT,
2701 .clkdm_name = "core_l4_clkdm",
2702};
2703
2704DEFINE_STRUCT_CLK(sha12_ick, aes2_ick_parent_names, aes2_ick_ops);
2705
2706static struct clk sr1_fck;
2707
2708static struct clk_hw_omap sr1_fck_hw = {
2709 .hw = {
2710 .clk = &sr1_fck,
2711 },
2712 .ops = &clkhwops_wait,
2713 .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
2714 .enable_bit = OMAP3430_EN_SR1_SHIFT,
2715 .clkdm_name = "wkup_clkdm",
2716};
2717
2718DEFINE_STRUCT_CLK(sr1_fck, cpefuse_fck_parent_names, aes2_ick_ops);
2719
2720static struct clk sr2_fck;
2721
2722static struct clk_hw_omap sr2_fck_hw = {
2723 .hw = {
2724 .clk = &sr2_fck,
2725 },
2726 .ops = &clkhwops_wait,
2727 .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
2728 .enable_bit = OMAP3430_EN_SR2_SHIFT,
2729 .clkdm_name = "wkup_clkdm",
2730};
2731
2732DEFINE_STRUCT_CLK(sr2_fck, cpefuse_fck_parent_names, aes2_ick_ops);
2733
2734static struct clk sr_l4_ick;
2735
2736DEFINE_STRUCT_CLK_HW_OMAP(sr_l4_ick, "core_l4_clkdm");
2737DEFINE_STRUCT_CLK(sr_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
2738
2739static struct clk ssi_l4_ick;
2740
2741DEFINE_STRUCT_CLK_HW_OMAP(ssi_l4_ick, "core_l4_clkdm");
2742DEFINE_STRUCT_CLK(ssi_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
2743
2744static struct clk ssi_ick_3430es1;
2745
2746static const char *ssi_ick_3430es1_parent_names[] = {
2747 "ssi_l4_ick",
2748};
2749
2750static struct clk_hw_omap ssi_ick_3430es1_hw = {
2751 .hw = {
2752 .clk = &ssi_ick_3430es1,
2753 },
2754 .ops = &clkhwops_iclk,
2755 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2756 .enable_bit = OMAP3430_EN_SSI_SHIFT,
2757 .clkdm_name = "core_l4_clkdm",
2758};
2759
2760DEFINE_STRUCT_CLK(ssi_ick_3430es1, ssi_ick_3430es1_parent_names, aes2_ick_ops);
2761
2762static struct clk ssi_ick_3430es2;
2763
2764static struct clk_hw_omap ssi_ick_3430es2_hw = {
2765 .hw = {
2766 .clk = &ssi_ick_3430es2,
2767 },
2768 .ops = &clkhwops_omap3430es2_iclk_ssi_wait,
2769 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2770 .enable_bit = OMAP3430_EN_SSI_SHIFT,
2771 .clkdm_name = "core_l4_clkdm",
2772};
2773
2774DEFINE_STRUCT_CLK(ssi_ick_3430es2, ssi_ick_3430es1_parent_names, aes2_ick_ops);
2775
2776static const struct clksel_rate ssi_ssr_corex2_rates[] = {
2777 { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
2778 { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
2779 { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
2780 { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
2781 { .div = 6, .val = 6, .flags = RATE_IN_3XXX },
2782 { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
2783 { .div = 0 }
2784};
2785
2786static const struct clksel ssi_ssr_clksel[] = {
2787 { .parent = &corex2_fck, .rates = ssi_ssr_corex2_rates },
2788 { .parent = NULL },
2789};
2790
2791static const char *ssi_ssr_fck_3430es1_parent_names[] = {
2792 "corex2_fck",
2793};
2794
2795static const struct clk_ops ssi_ssr_fck_3430es1_ops = {
2796 .init = &omap2_init_clk_clkdm,
2797 .enable = &omap2_dflt_clk_enable,
2798 .disable = &omap2_dflt_clk_disable,
2799 .is_enabled = &omap2_dflt_clk_is_enabled,
2800 .recalc_rate = &omap2_clksel_recalc,
2801 .set_rate = &omap2_clksel_set_rate,
2802 .round_rate = &omap2_clksel_round_rate,
2803};
2804
2805DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es1, "core_l4_clkdm",
2806 ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
2807 OMAP3430_CLKSEL_SSI_MASK,
2808 OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2809 OMAP3430_EN_SSI_SHIFT,
2810 NULL, ssi_ssr_fck_3430es1_parent_names,
2811 ssi_ssr_fck_3430es1_ops);
2812
2813DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es2, "core_l4_clkdm",
2814 ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
2815 OMAP3430_CLKSEL_SSI_MASK,
2816 OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2817 OMAP3430_EN_SSI_SHIFT,
2818 NULL, ssi_ssr_fck_3430es1_parent_names,
2819 ssi_ssr_fck_3430es1_ops);
2820
2821DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es1, "ssi_ssr_fck_3430es1",
2822 &ssi_ssr_fck_3430es1, 0x0, 1, 2);
2823
2824DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es2, "ssi_ssr_fck_3430es2",
2825 &ssi_ssr_fck_3430es2, 0x0, 1, 2);
2826
2827static struct clk sys_clkout1;
2828
2829static const char *sys_clkout1_parent_names[] = {
2830 "osc_sys_ck",
2831};
2832
2833static struct clk_hw_omap sys_clkout1_hw = {
2834 .hw = {
2835 .clk = &sys_clkout1,
2836 },
2837 .enable_reg = OMAP3430_PRM_CLKOUT_CTRL,
2838 .enable_bit = OMAP3430_CLKOUT_EN_SHIFT,
2839};
2840
2841DEFINE_STRUCT_CLK(sys_clkout1, sys_clkout1_parent_names, aes1_ick_ops);
2842
2843DEFINE_CLK_DIVIDER(sys_clkout2, "clkout2_src_ck", &clkout2_src_ck, 0x0,
2844 OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_DIV_SHIFT,
2845 OMAP3430_CLKOUT2_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
2846
2847DEFINE_CLK_MUX(traceclk_src_fck, emu_src_ck_parent_names, NULL, 0x0,
2848 OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
2849 OMAP3430_TRACE_MUX_CTRL_SHIFT, OMAP3430_TRACE_MUX_CTRL_WIDTH,
2850 0x0, NULL);
2851
2852DEFINE_CLK_DIVIDER(traceclk_fck, "traceclk_src_fck", &traceclk_src_fck, 0x0,
2853 OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
2854 OMAP3430_CLKSEL_TRACECLK_SHIFT,
2855 OMAP3430_CLKSEL_TRACECLK_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
2856
2857static struct clk ts_fck;
2858
2859static struct clk_hw_omap ts_fck_hw = {
2860 .hw = {
2861 .clk = &ts_fck,
2862 },
2863 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
2864 .enable_bit = OMAP3430ES2_EN_TS_SHIFT,
2865 .clkdm_name = "core_l4_clkdm",
2866};
2867
2868DEFINE_STRUCT_CLK(ts_fck, wkup_32k_fck_parent_names, aes2_ick_ops);
2869
2870static struct clk uart1_fck;
2871
2872static struct clk_hw_omap uart1_fck_hw = {
2873 .hw = {
2874 .clk = &uart1_fck,
2875 },
2876 .ops = &clkhwops_wait,
2877 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2878 .enable_bit = OMAP3430_EN_UART1_SHIFT,
2879 .clkdm_name = "core_l4_clkdm",
2880};
2881
2882DEFINE_STRUCT_CLK(uart1_fck, fshostusb_fck_parent_names, aes2_ick_ops);
2883
2884static struct clk uart1_ick;
2885
2886static struct clk_hw_omap uart1_ick_hw = {
2887 .hw = {
2888 .clk = &uart1_ick,
2889 },
2890 .ops = &clkhwops_iclk_wait,
2891 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2892 .enable_bit = OMAP3430_EN_UART1_SHIFT,
2893 .clkdm_name = "core_l4_clkdm",
2894};
2895
2896DEFINE_STRUCT_CLK(uart1_ick, aes2_ick_parent_names, aes2_ick_ops);
2897
2898static struct clk uart2_fck;
2899
2900static struct clk_hw_omap uart2_fck_hw = {
2901 .hw = {
2902 .clk = &uart2_fck,
2903 },
2904 .ops = &clkhwops_wait,
2905 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2906 .enable_bit = OMAP3430_EN_UART2_SHIFT,
2907 .clkdm_name = "core_l4_clkdm",
2908};
2909
2910DEFINE_STRUCT_CLK(uart2_fck, fshostusb_fck_parent_names, aes2_ick_ops);
2911
2912static struct clk uart2_ick;
2913
2914static struct clk_hw_omap uart2_ick_hw = {
2915 .hw = {
2916 .clk = &uart2_ick,
2917 },
2918 .ops = &clkhwops_iclk_wait,
2919 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
2920 .enable_bit = OMAP3430_EN_UART2_SHIFT,
2921 .clkdm_name = "core_l4_clkdm",
2922};
2923
2924DEFINE_STRUCT_CLK(uart2_ick, aes2_ick_parent_names, aes2_ick_ops);
2925
2926static struct clk uart3_fck;
2927
2928static const char *uart3_fck_parent_names[] = {
2929 "per_48m_fck",
2930};
2931
2932static struct clk_hw_omap uart3_fck_hw = {
2933 .hw = {
2934 .clk = &uart3_fck,
2935 },
2936 .ops = &clkhwops_wait,
2937 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
2938 .enable_bit = OMAP3430_EN_UART3_SHIFT,
2939 .clkdm_name = "per_clkdm",
2940};
2941
2942DEFINE_STRUCT_CLK(uart3_fck, uart3_fck_parent_names, aes2_ick_ops);
2943
2944static struct clk uart3_ick;
2945
2946static struct clk_hw_omap uart3_ick_hw = {
2947 .hw = {
2948 .clk = &uart3_ick,
2949 },
2950 .ops = &clkhwops_iclk_wait,
2951 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
2952 .enable_bit = OMAP3430_EN_UART3_SHIFT,
2953 .clkdm_name = "per_clkdm",
2954};
2955
2956DEFINE_STRUCT_CLK(uart3_ick, gpio2_ick_parent_names, aes2_ick_ops);
2957
2958static struct clk uart4_fck;
2959
2960static struct clk_hw_omap uart4_fck_hw = {
2961 .hw = {
2962 .clk = &uart4_fck,
2963 },
2964 .ops = &clkhwops_wait,
2965 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
2966 .enable_bit = OMAP3630_EN_UART4_SHIFT,
2967 .clkdm_name = "per_clkdm",
2968};
2969
2970DEFINE_STRUCT_CLK(uart4_fck, uart3_fck_parent_names, aes2_ick_ops);
2971
2972static struct clk uart4_fck_am35xx;
2973
2974static struct clk_hw_omap uart4_fck_am35xx_hw = {
2975 .hw = {
2976 .clk = &uart4_fck_am35xx,
2977 },
2978 .ops = &clkhwops_wait,
2979 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
2980 .enable_bit = AM35XX_EN_UART4_SHIFT,
2981 .clkdm_name = "core_l4_clkdm",
2982};
2983
2984DEFINE_STRUCT_CLK(uart4_fck_am35xx, fshostusb_fck_parent_names, aes2_ick_ops);
2985
2986static struct clk uart4_ick;
2987
2988static struct clk_hw_omap uart4_ick_hw = {
2989 .hw = {
2990 .clk = &uart4_ick,
2991 },
2992 .ops = &clkhwops_iclk_wait,
2993 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
2994 .enable_bit = OMAP3630_EN_UART4_SHIFT,
2995 .clkdm_name = "per_clkdm",
2996};
2997
2998DEFINE_STRUCT_CLK(uart4_ick, gpio2_ick_parent_names, aes2_ick_ops);
2999
3000static struct clk uart4_ick_am35xx;
3001
3002static struct clk_hw_omap uart4_ick_am35xx_hw = {
3003 .hw = {
3004 .clk = &uart4_ick_am35xx,
3005 },
3006 .ops = &clkhwops_iclk_wait,
3007 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
3008 .enable_bit = AM35XX_EN_UART4_SHIFT,
3009 .clkdm_name = "core_l4_clkdm",
3010};
3011
3012DEFINE_STRUCT_CLK(uart4_ick_am35xx, aes2_ick_parent_names, aes2_ick_ops);
3013
3014static const struct clksel_rate div2_rates[] = {
3015 { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
3016 { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
3017 { .div = 0 }
3018};
3019
3020static const struct clksel usb_l4_clksel[] = {
3021 { .parent = &l4_ick, .rates = div2_rates },
3022 { .parent = NULL },
3023};
3024
3025static const char *usb_l4_ick_parent_names[] = {
3026 "l4_ick",
3027};
3028
3029DEFINE_CLK_OMAP_MUX_GATE(usb_l4_ick, "core_l4_clkdm", usb_l4_clksel,
3030 OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
3031 OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK,
3032 OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
3033 OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
3034 &clkhwops_iclk_wait, usb_l4_ick_parent_names,
3035 ssi_ssr_fck_3430es1_ops);
3036
3037static struct clk usbhost_120m_fck;
3038
3039static const char *usbhost_120m_fck_parent_names[] = {
3040 "dpll5_m2_ck",
3041};
3042
3043static struct clk_hw_omap usbhost_120m_fck_hw = {
3044 .hw = {
3045 .clk = &usbhost_120m_fck,
3046 },
3047 .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
3048 .enable_bit = OMAP3430ES2_EN_USBHOST2_SHIFT,
3049 .clkdm_name = "usbhost_clkdm",
3050};
3051
3052DEFINE_STRUCT_CLK(usbhost_120m_fck, usbhost_120m_fck_parent_names,
3053 aes2_ick_ops);
3054
3055static struct clk usbhost_48m_fck;
3056
3057static struct clk_hw_omap usbhost_48m_fck_hw = {
3058 .hw = {
3059 .clk = &usbhost_48m_fck,
3060 },
3061 .ops = &clkhwops_omap3430es2_dss_usbhost_wait,
3062 .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
3063 .enable_bit = OMAP3430ES2_EN_USBHOST1_SHIFT,
3064 .clkdm_name = "usbhost_clkdm",
3065};
3066
3067DEFINE_STRUCT_CLK(usbhost_48m_fck, core_48m_fck_parent_names, aes2_ick_ops);
3068
3069static struct clk usbhost_ick;
3070
3071static struct clk_hw_omap usbhost_ick_hw = {
3072 .hw = {
3073 .clk = &usbhost_ick,
3074 },
3075 .ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait,
3076 .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN),
3077 .enable_bit = OMAP3430ES2_EN_USBHOST_SHIFT,
3078 .clkdm_name = "usbhost_clkdm",
3079};
3080
3081DEFINE_STRUCT_CLK(usbhost_ick, security_l4_ick2_parent_names, aes2_ick_ops);
3082
3083static struct clk usbtll_fck;
3084
3085static struct clk_hw_omap usbtll_fck_hw = {
3086 .hw = {
3087 .clk = &usbtll_fck,
3088 },
3089 .ops = &clkhwops_wait,
3090 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
3091 .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT,
3092 .clkdm_name = "core_l4_clkdm",
3093};
3094
3095DEFINE_STRUCT_CLK(usbtll_fck, usbhost_120m_fck_parent_names, aes2_ick_ops);
3096
3097static struct clk usbtll_ick;
3098
3099static struct clk_hw_omap usbtll_ick_hw = {
3100 .hw = {
3101 .clk = &usbtll_ick,
3102 },
3103 .ops = &clkhwops_iclk_wait,
3104 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
3105 .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT,
3106 .clkdm_name = "core_l4_clkdm",
3107};
3108
3109DEFINE_STRUCT_CLK(usbtll_ick, aes2_ick_parent_names, aes2_ick_ops);
3110
3111static const struct clksel_rate usim_96m_rates[] = {
3112 { .div = 2, .val = 3, .flags = RATE_IN_3XXX },
3113 { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
3114 { .div = 8, .val = 5, .flags = RATE_IN_3XXX },
3115 { .div = 10, .val = 6, .flags = RATE_IN_3XXX },
3116 { .div = 0 }
3117};
3118
3119static const struct clksel_rate usim_120m_rates[] = {
3120 { .div = 4, .val = 7, .flags = RATE_IN_3XXX },
3121 { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
3122 { .div = 16, .val = 9, .flags = RATE_IN_3XXX },
3123 { .div = 20, .val = 10, .flags = RATE_IN_3XXX },
3124 { .div = 0 }
3125};
3126
3127static const struct clksel usim_clksel[] = {
3128 { .parent = &omap_96m_fck, .rates = usim_96m_rates },
3129 { .parent = &dpll5_m2_ck, .rates = usim_120m_rates },
3130 { .parent = &sys_ck, .rates = div2_rates },
3131 { .parent = NULL },
3132};
3133
3134static const char *usim_fck_parent_names[] = {
3135 "omap_96m_fck", "dpll5_m2_ck", "sys_ck",
3136};
3137
3138static struct clk usim_fck;
3139
3140static const struct clk_ops usim_fck_ops = {
3141 .enable = &omap2_dflt_clk_enable,
3142 .disable = &omap2_dflt_clk_disable,
3143 .is_enabled = &omap2_dflt_clk_is_enabled,
3144 .recalc_rate = &omap2_clksel_recalc,
3145 .get_parent = &omap2_clksel_find_parent_index,
3146 .set_parent = &omap2_clksel_set_parent,
3147};
3148
3149DEFINE_CLK_OMAP_MUX_GATE(usim_fck, NULL, usim_clksel,
3150 OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
3151 OMAP3430ES2_CLKSEL_USIMOCP_MASK,
3152 OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
3153 OMAP3430ES2_EN_USIMOCP_SHIFT, &clkhwops_wait,
3154 usim_fck_parent_names, usim_fck_ops);
3155
3156static struct clk usim_ick;
3157
3158static struct clk_hw_omap usim_ick_hw = {
3159 .hw = {
3160 .clk = &usim_ick,
3161 },
3162 .ops = &clkhwops_iclk_wait,
3163 .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
3164 .enable_bit = OMAP3430ES2_EN_USIMOCP_SHIFT,
3165 .clkdm_name = "wkup_clkdm",
3166};
3167
3168DEFINE_STRUCT_CLK(usim_ick, gpio1_ick_parent_names, aes2_ick_ops);
3169
3170static struct clk vpfe_fck;
3171
3172static const char *vpfe_fck_parent_names[] = {
3173 "pclk_ck",
3174};
3175
3176static struct clk_hw_omap vpfe_fck_hw = {
3177 .hw = {
3178 .clk = &vpfe_fck,
3179 },
3180 .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
3181 .enable_bit = AM35XX_VPFE_FCLK_SHIFT,
3182};
3183
3184DEFINE_STRUCT_CLK(vpfe_fck, vpfe_fck_parent_names, aes1_ick_ops);
3185
3186static struct clk vpfe_ick;
3187
3188static struct clk_hw_omap vpfe_ick_hw = {
3189 .hw = {
3190 .clk = &vpfe_ick,
3191 },
3192 .ops = &clkhwops_am35xx_ipss_module_wait,
3193 .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
3194 .enable_bit = AM35XX_VPFE_VBUSP_CLK_SHIFT,
3195 .clkdm_name = "core_l3_clkdm",
3196};
3197
3198DEFINE_STRUCT_CLK(vpfe_ick, emac_ick_parent_names, aes2_ick_ops);
3199
3200static struct clk wdt1_fck;
3201
3202DEFINE_STRUCT_CLK_HW_OMAP(wdt1_fck, "wkup_clkdm");
3203DEFINE_STRUCT_CLK(wdt1_fck, gpt12_fck_parent_names, core_l4_ick_ops);
3204
3205static struct clk wdt1_ick;
3206
3207static struct clk_hw_omap wdt1_ick_hw = {
3208 .hw = {
3209 .clk = &wdt1_ick,
3210 },
3211 .ops = &clkhwops_iclk_wait,
3212 .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
3213 .enable_bit = OMAP3430_EN_WDT1_SHIFT,
3214 .clkdm_name = "wkup_clkdm",
3215};
3216
3217DEFINE_STRUCT_CLK(wdt1_ick, gpio1_ick_parent_names, aes2_ick_ops);
3218
3219static struct clk wdt2_fck;
3220
3221static struct clk_hw_omap wdt2_fck_hw = {
3222 .hw = {
3223 .clk = &wdt2_fck,
3224 },
3225 .ops = &clkhwops_wait,
3226 .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
3227 .enable_bit = OMAP3430_EN_WDT2_SHIFT,
3228 .clkdm_name = "wkup_clkdm",
3229};
3230
3231DEFINE_STRUCT_CLK(wdt2_fck, gpio1_dbck_parent_names, aes2_ick_ops);
3232
3233static struct clk wdt2_ick;
3234
3235static struct clk_hw_omap wdt2_ick_hw = {
3236 .hw = {
3237 .clk = &wdt2_ick,
3238 },
3239 .ops = &clkhwops_iclk_wait,
3240 .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
3241 .enable_bit = OMAP3430_EN_WDT2_SHIFT,
3242 .clkdm_name = "wkup_clkdm",
3243};
3244
3245DEFINE_STRUCT_CLK(wdt2_ick, gpio1_ick_parent_names, aes2_ick_ops);
3246
3247static struct clk wdt3_fck;
3248
3249static struct clk_hw_omap wdt3_fck_hw = {
3250 .hw = {
3251 .clk = &wdt3_fck,
3252 },
3253 .ops = &clkhwops_wait,
3254 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
3255 .enable_bit = OMAP3430_EN_WDT3_SHIFT,
3256 .clkdm_name = "per_clkdm",
3257};
3258
3259DEFINE_STRUCT_CLK(wdt3_fck, gpio2_dbck_parent_names, aes2_ick_ops);
3260
3261static struct clk wdt3_ick;
3262
3263static struct clk_hw_omap wdt3_ick_hw = {
3264 .hw = {
3265 .clk = &wdt3_ick,
3266 },
3267 .ops = &clkhwops_iclk_wait,
3268 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
3269 .enable_bit = OMAP3430_EN_WDT3_SHIFT,
3270 .clkdm_name = "per_clkdm",
3271};
3272
3273DEFINE_STRUCT_CLK(wdt3_ick, gpio2_ick_parent_names, aes2_ick_ops);
3274
3275/*
3276 * clocks specific to omap3430es1
3277 */
3278static struct omap_clk omap3430es1_clks[] = {
3279 CLK(NULL, "gfx_l3_ck", &gfx_l3_ck),
3280 CLK(NULL, "gfx_l3_fck", &gfx_l3_fck),
3281 CLK(NULL, "gfx_l3_ick", &gfx_l3_ick),
3282 CLK(NULL, "gfx_cg1_ck", &gfx_cg1_ck),
3283 CLK(NULL, "gfx_cg2_ck", &gfx_cg2_ck),
3284 CLK(NULL, "d2d_26m_fck", &d2d_26m_fck),
3285 CLK(NULL, "fshostusb_fck", &fshostusb_fck),
3286 CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1),
3287 CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1),
3288 CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es1),
3289 CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es1),
3290 CLK(NULL, "fac_ick", &fac_ick),
3291 CLK(NULL, "ssi_ick", &ssi_ick_3430es1),
3292 CLK(NULL, "usb_l4_ick", &usb_l4_ick),
3293 CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es1),
3294 CLK("omapdss_dss", "ick", &dss_ick_3430es1),
3295 CLK(NULL, "dss_ick", &dss_ick_3430es1),
3296};
3297
3298/*
3299 * clocks specific to am35xx
3300 */
3301static struct omap_clk am35xx_clks[] = {
3302 CLK(NULL, "ipss_ick", &ipss_ick),
3303 CLK(NULL, "rmii_ck", &rmii_ck),
3304 CLK(NULL, "pclk_ck", &pclk_ck),
3305 CLK(NULL, "emac_ick", &emac_ick),
3306 CLK(NULL, "emac_fck", &emac_fck),
3307 CLK("davinci_emac.0", NULL, &emac_ick),
3308 CLK("davinci_mdio.0", NULL, &emac_fck),
3309 CLK("vpfe-capture", "master", &vpfe_ick),
3310 CLK("vpfe-capture", "slave", &vpfe_fck),
3311 CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_am35xx),
3312 CLK(NULL, "hsotgusb_fck", &hsotgusb_fck_am35xx),
3313 CLK(NULL, "hecc_ck", &hecc_ck),
3314 CLK(NULL, "uart4_ick", &uart4_ick_am35xx),
3315 CLK(NULL, "uart4_fck", &uart4_fck_am35xx),
3316};
3317
3318/*
3319 * clocks specific to omap36xx
3320 */
3321static struct omap_clk omap36xx_clks[] = {
3322 CLK(NULL, "omap_192m_alwon_fck", &omap_192m_alwon_fck),
3323 CLK(NULL, "uart4_fck", &uart4_fck),
3324};
3325
3326/*
3327 * clocks common to omap36xx omap34xx
3328 */
3329static struct omap_clk omap34xx_omap36xx_clks[] = {
3330 CLK(NULL, "aes1_ick", &aes1_ick),
3331 CLK("omap_rng", "ick", &rng_ick),
3332 CLK("omap3-rom-rng", "ick", &rng_ick),
3333 CLK(NULL, "sha11_ick", &sha11_ick),
3334 CLK(NULL, "des1_ick", &des1_ick),
3335 CLK(NULL, "cam_mclk", &cam_mclk),
3336 CLK(NULL, "cam_ick", &cam_ick),
3337 CLK(NULL, "csi2_96m_fck", &csi2_96m_fck),
3338 CLK(NULL, "security_l3_ick", &security_l3_ick),
3339 CLK(NULL, "pka_ick", &pka_ick),
3340 CLK(NULL, "icr_ick", &icr_ick),
3341 CLK("omap-aes", "ick", &aes2_ick),
3342 CLK("omap-sham", "ick", &sha12_ick),
3343 CLK(NULL, "des2_ick", &des2_ick),
3344 CLK(NULL, "mspro_ick", &mspro_ick),
3345 CLK(NULL, "mailboxes_ick", &mailboxes_ick),
3346 CLK(NULL, "ssi_l4_ick", &ssi_l4_ick),
3347 CLK(NULL, "sr1_fck", &sr1_fck),
3348 CLK(NULL, "sr2_fck", &sr2_fck),
3349 CLK(NULL, "sr_l4_ick", &sr_l4_ick),
3350 CLK(NULL, "security_l4_ick2", &security_l4_ick2),
3351 CLK(NULL, "wkup_l4_ick", &wkup_l4_ick),
3352 CLK(NULL, "dpll2_fck", &dpll2_fck),
3353 CLK(NULL, "iva2_ck", &iva2_ck),
3354 CLK(NULL, "modem_fck", &modem_fck),
3355 CLK(NULL, "sad2d_ick", &sad2d_ick),
3356 CLK(NULL, "mad2d_ick", &mad2d_ick),
3357 CLK(NULL, "mspro_fck", &mspro_fck),
3358 CLK(NULL, "dpll2_ck", &dpll2_ck),
3359 CLK(NULL, "dpll2_m2_ck", &dpll2_m2_ck),
3360};
3361
3362/*
3363 * clocks common to omap36xx and omap3430es2plus
3364 */
3365static struct omap_clk omap36xx_omap3430es2plus_clks[] = {
3366 CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2),
3367 CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2),
3368 CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es2),
3369 CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es2),
3370 CLK(NULL, "ssi_ick", &ssi_ick_3430es2),
3371 CLK(NULL, "usim_fck", &usim_fck),
3372 CLK(NULL, "usim_ick", &usim_ick),
3373};
3374
3375/*
3376 * clocks common to am35xx omap36xx and omap3430es2plus
3377 */
3378static struct omap_clk omap36xx_am35xx_omap3430es2plus_clks[] = {
3379 CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck),
3380 CLK(NULL, "dpll5_ck", &dpll5_ck),
3381 CLK(NULL, "dpll5_m2_ck", &dpll5_m2_ck),
3382 CLK(NULL, "sgx_fck", &sgx_fck),
3383 CLK(NULL, "sgx_ick", &sgx_ick),
3384 CLK(NULL, "cpefuse_fck", &cpefuse_fck),
3385 CLK(NULL, "ts_fck", &ts_fck),
3386 CLK(NULL, "usbtll_fck", &usbtll_fck),
3387 CLK(NULL, "usbtll_ick", &usbtll_ick),
3388 CLK("omap_hsmmc.2", "ick", &mmchs3_ick),
3389 CLK(NULL, "mmchs3_ick", &mmchs3_ick),
3390 CLK(NULL, "mmchs3_fck", &mmchs3_fck),
3391 CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es2),
3392 CLK("omapdss_dss", "ick", &dss_ick_3430es2),
3393 CLK(NULL, "dss_ick", &dss_ick_3430es2),
3394 CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck),
3395 CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck),
3396 CLK(NULL, "usbhost_ick", &usbhost_ick),
3397};
3398
3399/*
3400 * common clocks
3401 */
3402static struct omap_clk omap3xxx_clks[] = {
3403 CLK(NULL, "apb_pclk", &dummy_apb_pclk),
3404 CLK(NULL, "omap_32k_fck", &omap_32k_fck),
3405 CLK(NULL, "virt_12m_ck", &virt_12m_ck),
3406 CLK(NULL, "virt_13m_ck", &virt_13m_ck),
3407 CLK(NULL, "virt_19200000_ck", &virt_19200000_ck),
3408 CLK(NULL, "virt_26000000_ck", &virt_26000000_ck),
3409 CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck),
3410 CLK(NULL, "osc_sys_ck", &osc_sys_ck),
3411 CLK("twl", "fck", &osc_sys_ck),
3412 CLK(NULL, "sys_ck", &sys_ck),
3413 CLK(NULL, "omap_96m_alwon_fck", &omap_96m_alwon_fck),
3414 CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck),
3415 CLK(NULL, "sys_altclk", &sys_altclk),
3416 CLK(NULL, "mcbsp_clks", &mcbsp_clks),
3417 CLK(NULL, "sys_clkout1", &sys_clkout1),
3418 CLK(NULL, "dpll1_ck", &dpll1_ck),
3419 CLK(NULL, "dpll1_x2_ck", &dpll1_x2_ck),
3420 CLK(NULL, "dpll1_x2m2_ck", &dpll1_x2m2_ck),
3421 CLK(NULL, "dpll3_ck", &dpll3_ck),
3422 CLK(NULL, "core_ck", &core_ck),
3423 CLK(NULL, "dpll3_x2_ck", &dpll3_x2_ck),
3424 CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck),
3425 CLK(NULL, "dpll3_m2x2_ck", &dpll3_m2x2_ck),
3426 CLK(NULL, "dpll3_m3_ck", &dpll3_m3_ck),
3427 CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck),
3428 CLK(NULL, "dpll4_ck", &dpll4_ck),
3429 CLK(NULL, "dpll4_x2_ck", &dpll4_x2_ck),
3430 CLK(NULL, "omap_96m_fck", &omap_96m_fck),
3431 CLK(NULL, "cm_96m_fck", &cm_96m_fck),
3432 CLK(NULL, "omap_54m_fck", &omap_54m_fck),
3433 CLK(NULL, "omap_48m_fck", &omap_48m_fck),
3434 CLK(NULL, "omap_12m_fck", &omap_12m_fck),
3435 CLK(NULL, "dpll4_m2_ck", &dpll4_m2_ck),
3436 CLK(NULL, "dpll4_m2x2_ck", &dpll4_m2x2_ck),
3437 CLK(NULL, "dpll4_m3_ck", &dpll4_m3_ck),
3438 CLK(NULL, "dpll4_m3x2_ck", &dpll4_m3x2_ck),
3439 CLK(NULL, "dpll4_m4_ck", &dpll4_m4_ck),
3440 CLK(NULL, "dpll4_m4x2_ck", &dpll4_m4x2_ck),
3441 CLK(NULL, "dpll4_m5_ck", &dpll4_m5_ck),
3442 CLK(NULL, "dpll4_m5x2_ck", &dpll4_m5x2_ck),
3443 CLK(NULL, "dpll4_m6_ck", &dpll4_m6_ck),
3444 CLK(NULL, "dpll4_m6x2_ck", &dpll4_m6x2_ck),
3445 CLK("etb", "emu_per_alwon_ck", &emu_per_alwon_ck),
3446 CLK(NULL, "clkout2_src_ck", &clkout2_src_ck),
3447 CLK(NULL, "sys_clkout2", &sys_clkout2),
3448 CLK(NULL, "corex2_fck", &corex2_fck),
3449 CLK(NULL, "dpll1_fck", &dpll1_fck),
3450 CLK(NULL, "mpu_ck", &mpu_ck),
3451 CLK(NULL, "arm_fck", &arm_fck),
3452 CLK("etb", "emu_mpu_alwon_ck", &emu_mpu_alwon_ck),
3453 CLK(NULL, "l3_ick", &l3_ick),
3454 CLK(NULL, "l4_ick", &l4_ick),
3455 CLK(NULL, "rm_ick", &rm_ick),
3456 CLK(NULL, "gpt10_fck", &gpt10_fck),
3457 CLK(NULL, "gpt11_fck", &gpt11_fck),
3458 CLK(NULL, "core_96m_fck", &core_96m_fck),
3459 CLK(NULL, "mmchs2_fck", &mmchs2_fck),
3460 CLK(NULL, "mmchs1_fck", &mmchs1_fck),
3461 CLK(NULL, "i2c3_fck", &i2c3_fck),
3462 CLK(NULL, "i2c2_fck", &i2c2_fck),
3463 CLK(NULL, "i2c1_fck", &i2c1_fck),
3464 CLK(NULL, "mcbsp5_fck", &mcbsp5_fck),
3465 CLK(NULL, "mcbsp1_fck", &mcbsp1_fck),
3466 CLK(NULL, "core_48m_fck", &core_48m_fck),
3467 CLK(NULL, "mcspi4_fck", &mcspi4_fck),
3468 CLK(NULL, "mcspi3_fck", &mcspi3_fck),
3469 CLK(NULL, "mcspi2_fck", &mcspi2_fck),
3470 CLK(NULL, "mcspi1_fck", &mcspi1_fck),
3471 CLK(NULL, "uart2_fck", &uart2_fck),
3472 CLK(NULL, "uart1_fck", &uart1_fck),
3473 CLK(NULL, "core_12m_fck", &core_12m_fck),
3474 CLK("omap_hdq.0", "fck", &hdq_fck),
3475 CLK(NULL, "hdq_fck", &hdq_fck),
3476 CLK(NULL, "core_l3_ick", &core_l3_ick),
3477 CLK(NULL, "sdrc_ick", &sdrc_ick),
3478 CLK(NULL, "gpmc_fck", &gpmc_fck),
3479 CLK(NULL, "core_l4_ick", &core_l4_ick),
3480 CLK("omap_hsmmc.1", "ick", &mmchs2_ick),
3481 CLK("omap_hsmmc.0", "ick", &mmchs1_ick),
3482 CLK(NULL, "mmchs2_ick", &mmchs2_ick),
3483 CLK(NULL, "mmchs1_ick", &mmchs1_ick),
3484 CLK("omap_hdq.0", "ick", &hdq_ick),
3485 CLK(NULL, "hdq_ick", &hdq_ick),
3486 CLK("omap2_mcspi.4", "ick", &mcspi4_ick),
3487 CLK("omap2_mcspi.3", "ick", &mcspi3_ick),
3488 CLK("omap2_mcspi.2", "ick", &mcspi2_ick),
3489 CLK("omap2_mcspi.1", "ick", &mcspi1_ick),
3490 CLK(NULL, "mcspi4_ick", &mcspi4_ick),
3491 CLK(NULL, "mcspi3_ick", &mcspi3_ick),
3492 CLK(NULL, "mcspi2_ick", &mcspi2_ick),
3493 CLK(NULL, "mcspi1_ick", &mcspi1_ick),
3494 CLK("omap_i2c.3", "ick", &i2c3_ick),
3495 CLK("omap_i2c.2", "ick", &i2c2_ick),
3496 CLK("omap_i2c.1", "ick", &i2c1_ick),
3497 CLK(NULL, "i2c3_ick", &i2c3_ick),
3498 CLK(NULL, "i2c2_ick", &i2c2_ick),
3499 CLK(NULL, "i2c1_ick", &i2c1_ick),
3500 CLK(NULL, "uart2_ick", &uart2_ick),
3501 CLK(NULL, "uart1_ick", &uart1_ick),
3502 CLK(NULL, "gpt11_ick", &gpt11_ick),
3503 CLK(NULL, "gpt10_ick", &gpt10_ick),
3504 CLK("omap-mcbsp.5", "ick", &mcbsp5_ick),
3505 CLK("omap-mcbsp.1", "ick", &mcbsp1_ick),
3506 CLK(NULL, "mcbsp5_ick", &mcbsp5_ick),
3507 CLK(NULL, "mcbsp1_ick", &mcbsp1_ick),
3508 CLK(NULL, "omapctrl_ick", &omapctrl_ick),
3509 CLK(NULL, "dss_tv_fck", &dss_tv_fck),
3510 CLK(NULL, "dss_96m_fck", &dss_96m_fck),
3511 CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck),
3512 CLK(NULL, "init_60m_fclk", &dummy_ck),
3513 CLK(NULL, "gpt1_fck", &gpt1_fck),
3514 CLK(NULL, "aes2_ick", &aes2_ick),
3515 CLK(NULL, "wkup_32k_fck", &wkup_32k_fck),
3516 CLK(NULL, "gpio1_dbck", &gpio1_dbck),
3517 CLK(NULL, "sha12_ick", &sha12_ick),
3518 CLK(NULL, "wdt2_fck", &wdt2_fck),
3519 CLK("omap_wdt", "ick", &wdt2_ick),
3520 CLK(NULL, "wdt2_ick", &wdt2_ick),
3521 CLK(NULL, "wdt1_ick", &wdt1_ick),
3522 CLK(NULL, "gpio1_ick", &gpio1_ick),
3523 CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick),
3524 CLK(NULL, "gpt12_ick", &gpt12_ick),
3525 CLK(NULL, "gpt1_ick", &gpt1_ick),
3526 CLK(NULL, "per_96m_fck", &per_96m_fck),
3527 CLK(NULL, "per_48m_fck", &per_48m_fck),
3528 CLK(NULL, "uart3_fck", &uart3_fck),
3529 CLK(NULL, "gpt2_fck", &gpt2_fck),
3530 CLK(NULL, "gpt3_fck", &gpt3_fck),
3531 CLK(NULL, "gpt4_fck", &gpt4_fck),
3532 CLK(NULL, "gpt5_fck", &gpt5_fck),
3533 CLK(NULL, "gpt6_fck", &gpt6_fck),
3534 CLK(NULL, "gpt7_fck", &gpt7_fck),
3535 CLK(NULL, "gpt8_fck", &gpt8_fck),
3536 CLK(NULL, "gpt9_fck", &gpt9_fck),
3537 CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck),
3538 CLK(NULL, "gpio6_dbck", &gpio6_dbck),
3539 CLK(NULL, "gpio5_dbck", &gpio5_dbck),
3540 CLK(NULL, "gpio4_dbck", &gpio4_dbck),
3541 CLK(NULL, "gpio3_dbck", &gpio3_dbck),
3542 CLK(NULL, "gpio2_dbck", &gpio2_dbck),
3543 CLK(NULL, "wdt3_fck", &wdt3_fck),
3544 CLK(NULL, "per_l4_ick", &per_l4_ick),
3545 CLK(NULL, "gpio6_ick", &gpio6_ick),
3546 CLK(NULL, "gpio5_ick", &gpio5_ick),
3547 CLK(NULL, "gpio4_ick", &gpio4_ick),
3548 CLK(NULL, "gpio3_ick", &gpio3_ick),
3549 CLK(NULL, "gpio2_ick", &gpio2_ick),
3550 CLK(NULL, "wdt3_ick", &wdt3_ick),
3551 CLK(NULL, "uart3_ick", &uart3_ick),
3552 CLK(NULL, "uart4_ick", &uart4_ick),
3553 CLK(NULL, "gpt9_ick", &gpt9_ick),
3554 CLK(NULL, "gpt8_ick", &gpt8_ick),
3555 CLK(NULL, "gpt7_ick", &gpt7_ick),
3556 CLK(NULL, "gpt6_ick", &gpt6_ick),
3557 CLK(NULL, "gpt5_ick", &gpt5_ick),
3558 CLK(NULL, "gpt4_ick", &gpt4_ick),
3559 CLK(NULL, "gpt3_ick", &gpt3_ick),
3560 CLK(NULL, "gpt2_ick", &gpt2_ick),
3561 CLK("omap-mcbsp.2", "ick", &mcbsp2_ick),
3562 CLK("omap-mcbsp.3", "ick", &mcbsp3_ick),
3563 CLK("omap-mcbsp.4", "ick", &mcbsp4_ick),
3564 CLK(NULL, "mcbsp4_ick", &mcbsp2_ick),
3565 CLK(NULL, "mcbsp3_ick", &mcbsp3_ick),
3566 CLK(NULL, "mcbsp2_ick", &mcbsp4_ick),
3567 CLK(NULL, "mcbsp2_fck", &mcbsp2_fck),
3568 CLK(NULL, "mcbsp3_fck", &mcbsp3_fck),
3569 CLK(NULL, "mcbsp4_fck", &mcbsp4_fck),
3570 CLK("etb", "emu_src_ck", &emu_src_ck),
3571 CLK(NULL, "emu_src_ck", &emu_src_ck),
3572 CLK(NULL, "pclk_fck", &pclk_fck),
3573 CLK(NULL, "pclkx2_fck", &pclkx2_fck),
3574 CLK(NULL, "atclk_fck", &atclk_fck),
3575 CLK(NULL, "traceclk_src_fck", &traceclk_src_fck),
3576 CLK(NULL, "traceclk_fck", &traceclk_fck),
3577 CLK(NULL, "secure_32k_fck", &secure_32k_fck),
3578 CLK(NULL, "gpt12_fck", &gpt12_fck),
3579 CLK(NULL, "wdt1_fck", &wdt1_fck),
3580 CLK(NULL, "timer_32k_ck", &omap_32k_fck),
3581 CLK(NULL, "timer_sys_ck", &sys_ck),
3582 CLK(NULL, "cpufreq_ck", &dpll1_ck),
3583};
3584
3585static const char *enable_init_clks[] = {
3586 "sdrc_ick",
3587 "gpmc_fck",
3588 "omapctrl_ick",
3589};
3590
3591int __init omap3xxx_clk_init(void)
3592{
3593 if (omap3_has_192mhz_clk())
3594 omap_96m_alwon_fck = omap_96m_alwon_fck_3630;
3595
3596 if (cpu_is_omap3630()) {
3597 dpll3_m3x2_ck = dpll3_m3x2_ck_3630;
3598 dpll4_m2x2_ck = dpll4_m2x2_ck_3630;
3599 dpll4_m3x2_ck = dpll4_m3x2_ck_3630;
3600 dpll4_m4x2_ck = dpll4_m4x2_ck_3630;
3601 dpll4_m5x2_ck = dpll4_m5x2_ck_3630;
3602 dpll4_m6x2_ck = dpll4_m6x2_ck_3630;
3603 }
3604
3605 /*
3606 * XXX This type of dynamic rewriting of the clock tree is
3607 * deprecated and should be revised soon.
3608 */
3609 if (cpu_is_omap3630())
3610 dpll4_dd = dpll4_dd_3630;
3611 else
3612 dpll4_dd = dpll4_dd_34xx;
3613
3614
3615 /*
3616 * 3505 must be tested before 3517, since 3517 returns true
3617 * for both AM3517 chips and AM3517 family chips, which
3618 * includes 3505. Unfortunately there's no obvious family
3619 * test for 3517/3505 :-(
3620 */
3621 if (soc_is_am35xx()) {
3622 cpu_mask = RATE_IN_34XX;
3623 omap_clocks_register(am35xx_clks, ARRAY_SIZE(am35xx_clks));
3624 omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
3625 ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
3626 omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks));
3627 } else if (cpu_is_omap3630()) {
3628 cpu_mask = (RATE_IN_34XX | RATE_IN_36XX);
3629 omap_clocks_register(omap36xx_clks, ARRAY_SIZE(omap36xx_clks));
3630 omap_clocks_register(omap36xx_omap3430es2plus_clks,
3631 ARRAY_SIZE(omap36xx_omap3430es2plus_clks));
3632 omap_clocks_register(omap34xx_omap36xx_clks,
3633 ARRAY_SIZE(omap34xx_omap36xx_clks));
3634 omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
3635 ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
3636 omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks));
3637 } else if (cpu_is_omap34xx()) {
3638 if (omap_rev() == OMAP3430_REV_ES1_0) {
3639 cpu_mask = RATE_IN_3430ES1;
3640 omap_clocks_register(omap3430es1_clks,
3641 ARRAY_SIZE(omap3430es1_clks));
3642 omap_clocks_register(omap34xx_omap36xx_clks,
3643 ARRAY_SIZE(omap34xx_omap36xx_clks));
3644 omap_clocks_register(omap3xxx_clks,
3645 ARRAY_SIZE(omap3xxx_clks));
3646 } else {
3647 /*
3648 * Assume that anything that we haven't matched yet
3649 * has 3430ES2-type clocks.
3650 */
3651 cpu_mask = RATE_IN_3430ES2PLUS;
3652 omap_clocks_register(omap34xx_omap36xx_clks,
3653 ARRAY_SIZE(omap34xx_omap36xx_clks));
3654 omap_clocks_register(omap36xx_omap3430es2plus_clks,
3655 ARRAY_SIZE(omap36xx_omap3430es2plus_clks));
3656 omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
3657 ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
3658 omap_clocks_register(omap3xxx_clks,
3659 ARRAY_SIZE(omap3xxx_clks));
3660 }
3661 } else {
3662 WARN(1, "clock: could not identify OMAP3 variant\n");
3663 }
3664
3665 omap2_clk_disable_autoidle_all();
3666
3667 omap2_clk_enable_init_clocks(enable_init_clks,
3668 ARRAY_SIZE(enable_init_clks));
3669
3670 pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
3671 (clk_get_rate(&osc_sys_ck) / 1000000),
3672 (clk_get_rate(&osc_sys_ck) / 100000) % 10,
3673 (clk_get_rate(&core_ck) / 1000000),
3674 (clk_get_rate(&arm_fck) / 1000000));
3675
3676 /*
3677 * Lock DPLL5 -- here only until other device init code can
3678 * handle this
3679 */
3680 if (omap_rev() >= OMAP3430_REV_ES2_0)
3681 omap3_clk_lock_dpll5();
3682
3683 /* Avoid sleeping during omap3_core_dpll_m2_set_rate() */
3684 sdrc_ick_p = clk_get(NULL, "sdrc_ick");
3685 arm_fck_p = clk_get(NULL, "arm_fck");
3686
3687 return 0;
3688}
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 4ae4ccebced2..6124db5c37ae 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -23,7 +23,6 @@
23#include <linux/clk-provider.h> 23#include <linux/clk-provider.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/clk-private.h>
27#include <asm/cpu.h> 26#include <asm/cpu.h>
28 27
29#include <trace/events/power.h> 28#include <trace/events/power.h>
@@ -633,21 +632,6 @@ const struct clk_hw_omap_ops clkhwops_wait = {
633}; 632};
634 633
635/** 634/**
636 * omap_clocks_register - register an array of omap_clk
637 * @ocs: pointer to an array of omap_clk to register
638 */
639void __init omap_clocks_register(struct omap_clk oclks[], int cnt)
640{
641 struct omap_clk *c;
642
643 for (c = oclks; c < oclks + cnt; c++) {
644 clkdev_add(&c->lk);
645 if (!__clk_init(NULL, c->lk.clk))
646 omap2_init_clk_hw_omap_clocks(c->lk.clk);
647 }
648}
649
650/**
651 * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument 635 * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument
652 * @mpurate_ck_name: clk name of the clock to change rate 636 * @mpurate_ck_name: clk name of the clock to change rate
653 * 637 *
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index 1cf9dd85248a..a56742f96000 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -40,23 +40,29 @@ struct omap_clk {
40struct clockdomain; 40struct clockdomain;
41 41
42#define DEFINE_STRUCT_CLK(_name, _parent_array_name, _clkops_name) \ 42#define DEFINE_STRUCT_CLK(_name, _parent_array_name, _clkops_name) \
43 static struct clk _name = { \ 43 static struct clk_core _name##_core = { \
44 .name = #_name, \ 44 .name = #_name, \
45 .hw = &_name##_hw.hw, \ 45 .hw = &_name##_hw.hw, \
46 .parent_names = _parent_array_name, \ 46 .parent_names = _parent_array_name, \
47 .num_parents = ARRAY_SIZE(_parent_array_name), \ 47 .num_parents = ARRAY_SIZE(_parent_array_name), \
48 .ops = &_clkops_name, \ 48 .ops = &_clkops_name, \
49 }; \
50 static struct clk _name = { \
51 .core = &_name##_core, \
49 }; 52 };
50 53
51#define DEFINE_STRUCT_CLK_FLAGS(_name, _parent_array_name, \ 54#define DEFINE_STRUCT_CLK_FLAGS(_name, _parent_array_name, \
52 _clkops_name, _flags) \ 55 _clkops_name, _flags) \
53 static struct clk _name = { \ 56 static struct clk_core _name##_core = { \
54 .name = #_name, \ 57 .name = #_name, \
55 .hw = &_name##_hw.hw, \ 58 .hw = &_name##_hw.hw, \
56 .parent_names = _parent_array_name, \ 59 .parent_names = _parent_array_name, \
57 .num_parents = ARRAY_SIZE(_parent_array_name), \ 60 .num_parents = ARRAY_SIZE(_parent_array_name), \
58 .ops = &_clkops_name, \ 61 .ops = &_clkops_name, \
59 .flags = _flags, \ 62 .flags = _flags, \
63 }; \
64 static struct clk _name = { \
65 .core = &_name##_core, \
60 }; 66 };
61 67
62#define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name) \ 68#define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name) \
@@ -238,7 +244,6 @@ struct ti_clk_features {
238extern struct ti_clk_features ti_clk_features; 244extern struct ti_clk_features ti_clk_features;
239 245
240extern const struct clkops clkops_omap2_dflt_wait; 246extern const struct clkops clkops_omap2_dflt_wait;
241extern const struct clkops clkops_dummy;
242extern const struct clkops clkops_omap2_dflt; 247extern const struct clkops clkops_omap2_dflt;
243 248
244extern struct clk_functions omap2_clk_functions; 249extern struct clk_functions omap2_clk_functions;
@@ -247,7 +252,6 @@ extern const struct clksel_rate gpt_32k_rates[];
247extern const struct clksel_rate gpt_sys_rates[]; 252extern const struct clksel_rate gpt_sys_rates[];
248extern const struct clksel_rate gfx_l3_rates[]; 253extern const struct clksel_rate gfx_l3_rates[];
249extern const struct clksel_rate dsp_ick_rates[]; 254extern const struct clksel_rate dsp_ick_rates[];
250extern struct clk dummy_ck;
251 255
252extern const struct clk_hw_omap_ops clkhwops_iclk_wait; 256extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
253extern const struct clk_hw_omap_ops clkhwops_wait; 257extern const struct clk_hw_omap_ops clkhwops_wait;
@@ -272,7 +276,5 @@ extern void __iomem *clk_memmaps[];
272extern int omap2_clkops_enable_clkdm(struct clk_hw *hw); 276extern int omap2_clkops_enable_clkdm(struct clk_hw *hw);
273extern void omap2_clkops_disable_clkdm(struct clk_hw *hw); 277extern void omap2_clkops_disable_clkdm(struct clk_hw *hw);
274 278
275extern void omap_clocks_register(struct omap_clk *oclks, int cnt);
276
277void __init ti_clk_init_features(void); 279void __init ti_clk_init_features(void);
278#endif 280#endif
diff --git a/arch/arm/mach-omap2/clock_common_data.c b/arch/arm/mach-omap2/clock_common_data.c
index ef4d21bfb964..61b60dfb14ce 100644
--- a/arch/arm/mach-omap2/clock_common_data.c
+++ b/arch/arm/mach-omap2/clock_common_data.c
@@ -16,7 +16,6 @@
16 * OMAP3xxx clock definition files. 16 * OMAP3xxx clock definition files.
17 */ 17 */
18 18
19#include <linux/clk-private.h>
20#include "clock.h" 19#include "clock.h"
21 20
22/* clksel_rate data common to 24xx/343x */ 21/* clksel_rate data common to 24xx/343x */
@@ -114,13 +113,3 @@ const struct clksel_rate div31_1to31_rates[] = {
114 { .div = 31, .val = 31, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, 113 { .div = 31, .val = 31, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
115 { .div = 0 }, 114 { .div = 0 },
116}; 115};
117
118/* Clocks shared between various OMAP SoCs */
119
120static struct clk_ops dummy_ck_ops = {};
121
122struct clk dummy_ck = {
123 .name = "dummy_clk",
124 .ops = &dummy_ck_ops,
125 .flags = CLK_IS_BASIC,
126};
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index c2da2a0fe5ad..44e57ec225d4 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -410,7 +410,7 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
410 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 410 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
411 int r; 411 int r;
412 struct dpll_data *dd; 412 struct dpll_data *dd;
413 struct clk *parent; 413 struct clk_hw *parent;
414 414
415 dd = clk->dpll_data; 415 dd = clk->dpll_data;
416 if (!dd) 416 if (!dd)
@@ -427,13 +427,13 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
427 } 427 }
428 } 428 }
429 429
430 parent = __clk_get_parent(hw->clk); 430 parent = __clk_get_hw(__clk_get_parent(hw->clk));
431 431
432 if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) { 432 if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) {
433 WARN_ON(parent != dd->clk_bypass); 433 WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
434 r = _omap3_noncore_dpll_bypass(clk); 434 r = _omap3_noncore_dpll_bypass(clk);
435 } else { 435 } else {
436 WARN_ON(parent != dd->clk_ref); 436 WARN_ON(parent != __clk_get_hw(dd->clk_ref));
437 r = _omap3_noncore_dpll_lock(clk); 437 r = _omap3_noncore_dpll_lock(clk);
438 } 438 }
439 439
@@ -473,6 +473,8 @@ void omap3_noncore_dpll_disable(struct clk_hw *hw)
473 * in failure. 473 * in failure.
474 */ 474 */
475long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate, 475long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate,
476 unsigned long min_rate,
477 unsigned long max_rate,
476 unsigned long *best_parent_rate, 478 unsigned long *best_parent_rate,
477 struct clk_hw **best_parent_clk) 479 struct clk_hw **best_parent_clk)
478{ 480{
@@ -549,7 +551,8 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
549 if (!dd) 551 if (!dd)
550 return -EINVAL; 552 return -EINVAL;
551 553
552 if (__clk_get_parent(hw->clk) != dd->clk_ref) 554 if (__clk_get_hw(__clk_get_parent(hw->clk)) !=
555 __clk_get_hw(dd->clk_ref))
553 return -EINVAL; 556 return -EINVAL;
554 557
555 if (dd->last_rounded_rate == 0) 558 if (dd->last_rounded_rate == 0)
diff --git a/arch/arm/mach-omap2/dpll44xx.c b/arch/arm/mach-omap2/dpll44xx.c
index fc712240e5fd..f231be05b9a6 100644
--- a/arch/arm/mach-omap2/dpll44xx.c
+++ b/arch/arm/mach-omap2/dpll44xx.c
@@ -202,6 +202,8 @@ out:
202 * in failure. 202 * in failure.
203 */ 203 */
204long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate, 204long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate,
205 unsigned long min_rate,
206 unsigned long max_rate,
205 unsigned long *best_parent_rate, 207 unsigned long *best_parent_rate,
206 struct clk_hw **best_parent_clk) 208 struct clk_hw **best_parent_clk)
207{ 209{
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index e60780f05374..c4871c55bd8b 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -461,7 +461,17 @@ void __init omap3_init_early(void)
461 omap3xxx_clockdomains_init(); 461 omap3xxx_clockdomains_init();
462 omap3xxx_hwmod_init(); 462 omap3xxx_hwmod_init();
463 omap_hwmod_init_postsetup(); 463 omap_hwmod_init_postsetup();
464 omap_clk_soc_init = omap3xxx_clk_init; 464 if (!of_have_populated_dt()) {
465 omap3_prcm_legacy_iomaps_init();
466 if (soc_is_am35xx())
467 omap_clk_soc_init = am35xx_clk_legacy_init;
468 else if (cpu_is_omap3630())
469 omap_clk_soc_init = omap36xx_clk_legacy_init;
470 else if (omap_rev() == OMAP3430_REV_ES1_0)
471 omap_clk_soc_init = omap3430es1_clk_legacy_init;
472 else
473 omap_clk_soc_init = omap3430_clk_legacy_init;
474 }
465} 475}
466 476
467void __init omap3430_init_early(void) 477void __init omap3430_init_early(void)
@@ -753,15 +763,17 @@ int __init omap_clk_init(void)
753 763
754 ti_clk_init_features(); 764 ti_clk_init_features();
755 765
756 ret = of_prcm_init(); 766 if (of_have_populated_dt()) {
757 if (ret) 767 ret = of_prcm_init();
758 return ret; 768 if (ret)
769 return ret;
759 770
760 of_clk_init(NULL); 771 of_clk_init(NULL);
761 772
762 ti_dt_clk_init_retry_clks(); 773 ti_dt_clk_init_retry_clks();
763 774
764 ti_dt_clockdomains_setup(); 775 ti_dt_clockdomains_setup();
776 }
765 777
766 ret = omap_clk_soc_init(); 778 ret = omap_clk_soc_init();
767 779
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 2418bdf28ca2..cee0fe1ee6ff 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -242,7 +242,7 @@ static int __init omap4_sar_ram_init(void)
242} 242}
243omap_early_initcall(omap4_sar_ram_init); 243omap_early_initcall(omap4_sar_ram_init);
244 244
245static struct of_device_id gic_match[] = { 245static const struct of_device_id gic_match[] = {
246 { .compatible = "arm,cortex-a9-gic", }, 246 { .compatible = "arm,cortex-a9-gic", },
247 { .compatible = "arm,cortex-a15-gic", }, 247 { .compatible = "arm,cortex-a15-gic", },
248 { }, 248 { },
diff --git a/arch/arm/mach-omap2/prm.h b/arch/arm/mach-omap2/prm.h
index 77752e49d8d4..b9061a6a2db8 100644
--- a/arch/arm/mach-omap2/prm.h
+++ b/arch/arm/mach-omap2/prm.h
@@ -20,6 +20,7 @@ extern void __iomem *prm_base;
20extern u16 prm_features; 20extern u16 prm_features;
21extern void omap2_set_globals_prm(void __iomem *prm); 21extern void omap2_set_globals_prm(void __iomem *prm);
22int of_prcm_init(void); 22int of_prcm_init(void);
23void omap3_prcm_legacy_iomaps_init(void);
23# endif 24# endif
24 25
25/* 26/*
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
index c5e00c6714b1..5713bbdf83bc 100644
--- a/arch/arm/mach-omap2/prm3xxx.c
+++ b/arch/arm/mach-omap2/prm3xxx.c
@@ -674,7 +674,7 @@ int __init omap3xxx_prm_init(void)
674 return prm_register(&omap3xxx_prm_ll_data); 674 return prm_register(&omap3xxx_prm_ll_data);
675} 675}
676 676
677static struct of_device_id omap3_prm_dt_match_table[] = { 677static const struct of_device_id omap3_prm_dt_match_table[] = {
678 { .compatible = "ti,omap3-prm" }, 678 { .compatible = "ti,omap3-prm" },
679 { } 679 { }
680}; 680};
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 408c64efb807..a08a617a6c11 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -712,7 +712,7 @@ int __init omap44xx_prm_init(void)
712 return prm_register(&omap44xx_prm_ll_data); 712 return prm_register(&omap44xx_prm_ll_data);
713} 713}
714 714
715static struct of_device_id omap_prm_dt_match_table[] = { 715static const struct of_device_id omap_prm_dt_match_table[] = {
716 { .compatible = "ti,omap4-prm" }, 716 { .compatible = "ti,omap4-prm" },
717 { .compatible = "ti,omap5-prm" }, 717 { .compatible = "ti,omap5-prm" },
718 { .compatible = "ti,dra7-prm" }, 718 { .compatible = "ti,dra7-prm" },
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 264b5e29404d..bfaa7ba595cc 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -35,6 +35,8 @@
35#include "prm44xx.h" 35#include "prm44xx.h"
36#include "common.h" 36#include "common.h"
37#include "clock.h" 37#include "clock.h"
38#include "cm.h"
39#include "control.h"
38 40
39/* 41/*
40 * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs 42 * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
@@ -641,6 +643,15 @@ int __init of_prcm_init(void)
641 return 0; 643 return 0;
642} 644}
643 645
646void __init omap3_prcm_legacy_iomaps_init(void)
647{
648 ti_clk_ll_ops = &omap_clk_ll_ops;
649
650 clk_memmaps[TI_CLKM_CM] = cm_base + OMAP3430_IVA2_MOD;
651 clk_memmaps[TI_CLKM_PRM] = prm_base + OMAP3430_IVA2_MOD;
652 clk_memmaps[TI_CLKM_SCRM] = omap_ctrl_base_get();
653}
654
644static int __init prm_late_init(void) 655static int __init prm_late_init(void)
645{ 656{
646 if (prm_ll_data->late_init) 657 if (prm_ll_data->late_init)
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index a219dc310d5d..e03d8b5c9ad0 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -27,7 +27,6 @@ config ARCH_ATLAS7
27 select CPU_V7 27 select CPU_V7
28 select HAVE_ARM_SCU if SMP 28 select HAVE_ARM_SCU if SMP
29 select HAVE_SMP 29 select HAVE_SMP
30 select SMP_ON_UP if SMP
31 help 30 help
32 Support for CSR SiRFSoC ARM Cortex A7 Platform 31 Support for CSR SiRFSoC ARM Cortex A7 Platform
33 32
diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c
index 0c819bb88418..8cadb302a7d2 100644
--- a/arch/arm/mach-prima2/common.c
+++ b/arch/arm/mach-prima2/common.c
@@ -21,7 +21,7 @@ static void __init sirfsoc_init_late(void)
21} 21}
22 22
23#ifdef CONFIG_ARCH_ATLAS6 23#ifdef CONFIG_ARCH_ATLAS6
24static const char *atlas6_dt_match[] __initconst = { 24static const char *const atlas6_dt_match[] __initconst = {
25 "sirf,atlas6", 25 "sirf,atlas6",
26 NULL 26 NULL
27}; 27};
@@ -36,7 +36,7 @@ MACHINE_END
36#endif 36#endif
37 37
38#ifdef CONFIG_ARCH_PRIMA2 38#ifdef CONFIG_ARCH_PRIMA2
39static const char *prima2_dt_match[] __initconst = { 39static const char *const prima2_dt_match[] __initconst = {
40 "sirf,prima2", 40 "sirf,prima2",
41 NULL 41 NULL
42}; 42};
@@ -52,7 +52,7 @@ MACHINE_END
52#endif 52#endif
53 53
54#ifdef CONFIG_ARCH_ATLAS7 54#ifdef CONFIG_ARCH_ATLAS7
55static const char *atlas7_dt_match[] __initdata = { 55static const char *const atlas7_dt_match[] __initconst = {
56 "sirf,atlas7", 56 "sirf,atlas7",
57 NULL 57 NULL
58}; 58};
diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
index fc2b03c81e5f..e46c91094dde 100644
--- a/arch/arm/mach-prima2/platsmp.c
+++ b/arch/arm/mach-prima2/platsmp.c
@@ -40,7 +40,7 @@ static void sirfsoc_secondary_init(unsigned int cpu)
40 spin_unlock(&boot_lock); 40 spin_unlock(&boot_lock);
41} 41}
42 42
43static struct of_device_id clk_ids[] = { 43static const struct of_device_id clk_ids[] = {
44 { .compatible = "sirf,atlas7-clkc" }, 44 { .compatible = "sirf,atlas7-clkc" },
45 {}, 45 {},
46}; 46};
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
index 343c4e3a7c5d..7d8eab857a93 100644
--- a/arch/arm/mach-pxa/idp.c
+++ b/arch/arm/mach-pxa/idp.c
@@ -81,11 +81,16 @@ static struct resource smc91x_resources[] = {
81 } 81 }
82}; 82};
83 83
84static struct smc91x_platdata smc91x_platdata = {
85 .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
86};
87
84static struct platform_device smc91x_device = { 88static struct platform_device smc91x_device = {
85 .name = "smc91x", 89 .name = "smc91x",
86 .id = 0, 90 .id = 0,
87 .num_resources = ARRAY_SIZE(smc91x_resources), 91 .num_resources = ARRAY_SIZE(smc91x_resources),
88 .resource = smc91x_resources, 92 .resource = smc91x_resources,
93 .dev.platform_data = &smc91x_platdata,
89}; 94};
90 95
91static void idp_backlight_power(int on) 96static void idp_backlight_power(int on)
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index ad777b353bd5..28da319d389f 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -24,6 +24,7 @@
24#include <linux/mtd/mtd.h> 24#include <linux/mtd/mtd.h>
25#include <linux/mtd/partitions.h> 25#include <linux/mtd/partitions.h>
26#include <linux/pwm_backlight.h> 26#include <linux/pwm_backlight.h>
27#include <linux/smc91x.h>
27 28
28#include <asm/types.h> 29#include <asm/types.h>
29#include <asm/setup.h> 30#include <asm/setup.h>
@@ -189,15 +190,20 @@ static struct resource smc91x_resources[] = {
189 [1] = { 190 [1] = {
190 .start = LPD270_ETHERNET_IRQ, 191 .start = LPD270_ETHERNET_IRQ,
191 .end = LPD270_ETHERNET_IRQ, 192 .end = LPD270_ETHERNET_IRQ,
192 .flags = IORESOURCE_IRQ, 193 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
193 }, 194 },
194}; 195};
195 196
197struct smc91x_platdata smc91x_platdata = {
198 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT;
199};
200
196static struct platform_device smc91x_device = { 201static struct platform_device smc91x_device = {
197 .name = "smc91x", 202 .name = "smc91x",
198 .id = 0, 203 .id = 0,
199 .num_resources = ARRAY_SIZE(smc91x_resources), 204 .num_resources = ARRAY_SIZE(smc91x_resources),
200 .resource = smc91x_resources, 205 .resource = smc91x_resources,
206 .dev.platform_data = &smc91x_platdata,
201}; 207};
202 208
203static struct resource lpd270_flash_resources[] = { 209static struct resource lpd270_flash_resources[] = {
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 850e506926df..c309593abdb2 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -28,6 +28,7 @@
28#include <linux/platform_data/video-clcd-versatile.h> 28#include <linux/platform_data/video-clcd-versatile.h>
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/smsc911x.h> 30#include <linux/smsc911x.h>
31#include <linux/smc91x.h>
31#include <linux/ata_platform.h> 32#include <linux/ata_platform.h>
32#include <linux/amba/mmci.h> 33#include <linux/amba/mmci.h>
33#include <linux/gfp.h> 34#include <linux/gfp.h>
@@ -94,6 +95,10 @@ static struct smsc911x_platform_config smsc911x_config = {
94 .phy_interface = PHY_INTERFACE_MODE_MII, 95 .phy_interface = PHY_INTERFACE_MODE_MII,
95}; 96};
96 97
98static struct smc91x_platdata smc91x_platdata = {
99 .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
100};
101
97static struct platform_device realview_eth_device = { 102static struct platform_device realview_eth_device = {
98 .name = "smsc911x", 103 .name = "smsc911x",
99 .id = 0, 104 .id = 0,
@@ -107,6 +112,8 @@ int realview_eth_register(const char *name, struct resource *res)
107 realview_eth_device.resource = res; 112 realview_eth_device.resource = res;
108 if (strcmp(realview_eth_device.name, "smsc911x") == 0) 113 if (strcmp(realview_eth_device.name, "smsc911x") == 0)
109 realview_eth_device.dev.platform_data = &smsc911x_config; 114 realview_eth_device.dev.platform_data = &smsc911x_config;
115 else
116 realview_eth_device.dev.platform_data = &smc91x_platdata;
110 117
111 return platform_device_register(&realview_eth_device); 118 return platform_device_register(&realview_eth_device);
112} 119}
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 64c88d657f9e..b3869cbbcc68 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -234,7 +234,7 @@ static struct resource realview_eb_eth_resources[] = {
234 [1] = { 234 [1] = {
235 .start = IRQ_EB_ETH, 235 .start = IRQ_EB_ETH,
236 .end = IRQ_EB_ETH, 236 .end = IRQ_EB_ETH,
237 .flags = IORESOURCE_IRQ, 237 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
238 }, 238 },
239}; 239};
240 240
diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig
index 5078932c1683..ae4eb7cc4bcc 100644
--- a/arch/arm/mach-rockchip/Kconfig
+++ b/arch/arm/mach-rockchip/Kconfig
@@ -11,6 +11,7 @@ config ARCH_ROCKCHIP
11 select HAVE_ARM_SCU if SMP 11 select HAVE_ARM_SCU if SMP
12 select HAVE_ARM_TWD if SMP 12 select HAVE_ARM_TWD if SMP
13 select DW_APB_TIMER_OF 13 select DW_APB_TIMER_OF
14 select REGULATOR if PM
14 select ROCKCHIP_TIMER 15 select ROCKCHIP_TIMER
15 select ARM_GLOBAL_TIMER 16 select ARM_GLOBAL_TIMER
16 select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK 17 select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
diff --git a/arch/arm/mach-rockchip/pm.h b/arch/arm/mach-rockchip/pm.h
index 7d752ff39f91..7c889c04604b 100644
--- a/arch/arm/mach-rockchip/pm.h
+++ b/arch/arm/mach-rockchip/pm.h
@@ -24,7 +24,13 @@ extern unsigned long rkpm_bootdata_ddr_data;
24extern unsigned long rk3288_bootram_sz; 24extern unsigned long rk3288_bootram_sz;
25 25
26void rockchip_slp_cpu_resume(void); 26void rockchip_slp_cpu_resume(void);
27#ifdef CONFIG_PM_SLEEP
27void __init rockchip_suspend_init(void); 28void __init rockchip_suspend_init(void);
29#else
30static inline void rockchip_suspend_init(void)
31{
32}
33#endif
28 34
29/****** following is rk3288 defined **********/ 35/****** following is rk3288 defined **********/
30#define RK3288_PMU_WAKEUP_CFG0 0x00 36#define RK3288_PMU_WAKEUP_CFG0 0x00
diff --git a/arch/arm/mach-s5pv210/s5pv210.c b/arch/arm/mach-s5pv210/s5pv210.c
index 43eb1eaea0c9..83e656ea95ae 100644
--- a/arch/arm/mach-s5pv210/s5pv210.c
+++ b/arch/arm/mach-s5pv210/s5pv210.c
@@ -63,7 +63,7 @@ static void __init s5pv210_dt_init_late(void)
63 s5pv210_pm_init(); 63 s5pv210_pm_init();
64} 64}
65 65
66static char const *s5pv210_dt_compat[] __initconst = { 66static char const *const s5pv210_dt_compat[] __initconst = {
67 "samsung,s5pc110", 67 "samsung,s5pc110",
68 "samsung,s5pv210", 68 "samsung,s5pv210",
69 NULL 69 NULL
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 169262e3040d..7b0cd3172354 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -12,6 +12,7 @@
12#include <linux/pm.h> 12#include <linux/pm.h>
13#include <linux/serial_core.h> 13#include <linux/serial_core.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/smc91x.h>
15 16
16#include <asm/mach-types.h> 17#include <asm/mach-types.h>
17#include <asm/mach/map.h> 18#include <asm/mach/map.h>
@@ -258,12 +259,17 @@ static int neponset_probe(struct platform_device *dev)
258 0x02000000, "smc91x-attrib"), 259 0x02000000, "smc91x-attrib"),
259 { .flags = IORESOURCE_IRQ }, 260 { .flags = IORESOURCE_IRQ },
260 }; 261 };
262 struct smc91x_platdata smc91x_platdata = {
263 .flags = SMC91X_USE_8BIT | SMC91X_IO_SHIFT_2 | SMC91X_NOWAIT,
264 };
261 struct platform_device_info smc91x_devinfo = { 265 struct platform_device_info smc91x_devinfo = {
262 .parent = &dev->dev, 266 .parent = &dev->dev,
263 .name = "smc91x", 267 .name = "smc91x",
264 .id = 0, 268 .id = 0,
265 .res = smc91x_resources, 269 .res = smc91x_resources,
266 .num_res = ARRAY_SIZE(smc91x_resources), 270 .num_res = ARRAY_SIZE(smc91x_resources),
271 .data = &smc91c_platdata,
272 .size_data = sizeof(smc91c_platdata),
267 }; 273 };
268 int ret, irq; 274 int ret, irq;
269 275
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
index 091261878eff..696fd0fe4806 100644
--- a/arch/arm/mach-sa1100/pleb.c
+++ b/arch/arm/mach-sa1100/pleb.c
@@ -11,6 +11,7 @@
11#include <linux/irq.h> 11#include <linux/irq.h>
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/mtd/partitions.h> 13#include <linux/mtd/partitions.h>
14#include <linux/smc91x.h>
14 15
15#include <mach/hardware.h> 16#include <mach/hardware.h>
16#include <asm/setup.h> 17#include <asm/setup.h>
@@ -43,12 +44,18 @@ static struct resource smc91x_resources[] = {
43#endif 44#endif
44}; 45};
45 46
47static struct smc91x_platdata smc91x_platdata = {
48 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
49};
46 50
47static struct platform_device smc91x_device = { 51static struct platform_device smc91x_device = {
48 .name = "smc91x", 52 .name = "smc91x",
49 .id = 0, 53 .id = 0,
50 .num_resources = ARRAY_SIZE(smc91x_resources), 54 .num_resources = ARRAY_SIZE(smc91x_resources),
51 .resource = smc91x_resources, 55 .resource = smc91x_resources,
56 .dev = {
57 .platform_data = &smc91c_platdata,
58 },
52}; 59};
53 60
54static struct platform_device *devices[] __initdata = { 61static struct platform_device *devices[] __initdata = {
diff --git a/arch/arm/mach-shmobile/setup-emev2.c b/arch/arm/mach-shmobile/setup-emev2.c
index aad97be9cbe1..37f7b15c01bc 100644
--- a/arch/arm/mach-shmobile/setup-emev2.c
+++ b/arch/arm/mach-shmobile/setup-emev2.c
@@ -37,7 +37,7 @@ static void __init emev2_map_io(void)
37 iotable_init(emev2_io_desc, ARRAY_SIZE(emev2_io_desc)); 37 iotable_init(emev2_io_desc, ARRAY_SIZE(emev2_io_desc));
38} 38}
39 39
40static const char *emev2_boards_compat_dt[] __initconst = { 40static const char *const emev2_boards_compat_dt[] __initconst = {
41 "renesas,emev2", 41 "renesas,emev2",
42 NULL, 42 NULL,
43}; 43};
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index 8825bc9e2553..3b1ac463a494 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -13,6 +13,7 @@ menuconfig ARCH_STI
13 select ARM_ERRATA_775420 13 select ARM_ERRATA_775420
14 select PL310_ERRATA_753970 if CACHE_L2X0 14 select PL310_ERRATA_753970 if CACHE_L2X0
15 select PL310_ERRATA_769419 if CACHE_L2X0 15 select PL310_ERRATA_769419 if CACHE_L2X0
16 select RESET_CONTROLLER
16 help 17 help
17 Include support for STiH41x SOCs like STiH415/416 using the device tree 18 Include support for STiH41x SOCs like STiH415/416 using the device tree
18 for discovery 19 for discovery
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index ef016af1c9e7..914341bcef25 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -91,8 +91,6 @@ static void __init tegra_dt_init(void)
91 struct soc_device *soc_dev; 91 struct soc_device *soc_dev;
92 struct device *parent = NULL; 92 struct device *parent = NULL;
93 93
94 tegra_clocks_apply_init_table();
95
96 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); 94 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
97 if (!soc_dev_attr) 95 if (!soc_dev_attr)
98 goto out; 96 goto out;
diff --git a/arch/arm/mach-ux500/pm_domains.c b/arch/arm/mach-ux500/pm_domains.c
index 0d4b5b46f15b..4d71c90f801c 100644
--- a/arch/arm/mach-ux500/pm_domains.c
+++ b/arch/arm/mach-ux500/pm_domains.c
@@ -49,7 +49,7 @@ static struct generic_pm_domain *ux500_pm_domains[NR_DOMAINS] = {
49 [DOMAIN_VAPE] = &ux500_pm_domain_vape, 49 [DOMAIN_VAPE] = &ux500_pm_domain_vape,
50}; 50};
51 51
52static struct of_device_id ux500_pm_domain_matches[] = { 52static const struct of_device_id ux500_pm_domain_matches[] __initconst = {
53 { .compatible = "stericsson,ux500-pm-domains", }, 53 { .compatible = "stericsson,ux500-pm-domains", },
54 { }, 54 { },
55}; 55};
diff --git a/arch/arm/mach-versatile/versatile_dt.c b/arch/arm/mach-versatile/versatile_dt.c
index 9f9bc61ca64b..7de3e92a13b0 100644
--- a/arch/arm/mach-versatile/versatile_dt.c
+++ b/arch/arm/mach-versatile/versatile_dt.c
@@ -35,7 +35,7 @@ static void __init versatile_dt_init(void)
35 versatile_auxdata_lookup, NULL); 35 versatile_auxdata_lookup, NULL);
36} 36}
37 37
38static const char *versatile_dt_match[] __initconst = { 38static const char *const versatile_dt_match[] __initconst = {
39 "arm,versatile-ab", 39 "arm,versatile-ab",
40 "arm,versatile-pb", 40 "arm,versatile-pb",
41 NULL, 41 NULL,
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index d6b16d9a7838..3c2509b4b694 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -73,6 +73,7 @@ config ARCH_VEXPRESS_TC2_PM
73 depends on MCPM 73 depends on MCPM
74 select ARM_CCI 74 select ARM_CCI
75 select ARCH_VEXPRESS_SPC 75 select ARCH_VEXPRESS_SPC
76 select ARM_CPU_SUSPEND
76 help 77 help
77 Support for CPU and cluster power management on Versatile Express 78 Support for CPU and cluster power management on Versatile Express
78 with a TC2 (A15x2 A7x3) big.LITTLE core tile. 79 with a TC2 (A15x2 A7x3) big.LITTLE core tile.
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c43c71455566..9b4f29e595a4 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -892,13 +892,6 @@ config CACHE_L2X0
892 892
893if CACHE_L2X0 893if CACHE_L2X0
894 894
895config CACHE_PL310
896 bool
897 default y if CPU_V7 && !(CPU_V6 || CPU_V6K)
898 help
899 This option enables optimisations for the PL310 cache
900 controller.
901
902config PL310_ERRATA_588369 895config PL310_ERRATA_588369
903 bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines" 896 bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
904 help 897 help
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 903dba064a03..170a116d1b29 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1106,7 +1106,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1106 int i = 0; 1106 int i = 0;
1107 1107
1108 if (array_size <= PAGE_SIZE) 1108 if (array_size <= PAGE_SIZE)
1109 pages = kzalloc(array_size, gfp); 1109 pages = kzalloc(array_size, GFP_KERNEL);
1110 else 1110 else
1111 pages = vzalloc(array_size); 1111 pages = vzalloc(array_size);
1112 if (!pages) 1112 if (!pages)
diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dts b/arch/arm64/boot/dts/arm/foundation-v8.dts
index 27f32962e55c..4eac8dcea423 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8.dts
+++ b/arch/arm64/boot/dts/arm/foundation-v8.dts
@@ -34,6 +34,7 @@
34 reg = <0x0 0x0>; 34 reg = <0x0 0x0>;
35 enable-method = "spin-table"; 35 enable-method = "spin-table";
36 cpu-release-addr = <0x0 0x8000fff8>; 36 cpu-release-addr = <0x0 0x8000fff8>;
37 next-level-cache = <&L2_0>;
37 }; 38 };
38 cpu@1 { 39 cpu@1 {
39 device_type = "cpu"; 40 device_type = "cpu";
@@ -41,6 +42,7 @@
41 reg = <0x0 0x1>; 42 reg = <0x0 0x1>;
42 enable-method = "spin-table"; 43 enable-method = "spin-table";
43 cpu-release-addr = <0x0 0x8000fff8>; 44 cpu-release-addr = <0x0 0x8000fff8>;
45 next-level-cache = <&L2_0>;
44 }; 46 };
45 cpu@2 { 47 cpu@2 {
46 device_type = "cpu"; 48 device_type = "cpu";
@@ -48,6 +50,7 @@
48 reg = <0x0 0x2>; 50 reg = <0x0 0x2>;
49 enable-method = "spin-table"; 51 enable-method = "spin-table";
50 cpu-release-addr = <0x0 0x8000fff8>; 52 cpu-release-addr = <0x0 0x8000fff8>;
53 next-level-cache = <&L2_0>;
51 }; 54 };
52 cpu@3 { 55 cpu@3 {
53 device_type = "cpu"; 56 device_type = "cpu";
@@ -55,6 +58,11 @@
55 reg = <0x0 0x3>; 58 reg = <0x0 0x3>;
56 enable-method = "spin-table"; 59 enable-method = "spin-table";
57 cpu-release-addr = <0x0 0x8000fff8>; 60 cpu-release-addr = <0x0 0x8000fff8>;
61 next-level-cache = <&L2_0>;
62 };
63
64 L2_0: l2-cache0 {
65 compatible = "cache";
58 }; 66 };
59 }; 67 };
60 68
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index d429129ecb3d..133ee59de2d7 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -39,6 +39,7 @@
39 reg = <0x0 0x0>; 39 reg = <0x0 0x0>;
40 device_type = "cpu"; 40 device_type = "cpu";
41 enable-method = "psci"; 41 enable-method = "psci";
42 next-level-cache = <&A57_L2>;
42 }; 43 };
43 44
44 A57_1: cpu@1 { 45 A57_1: cpu@1 {
@@ -46,6 +47,7 @@
46 reg = <0x0 0x1>; 47 reg = <0x0 0x1>;
47 device_type = "cpu"; 48 device_type = "cpu";
48 enable-method = "psci"; 49 enable-method = "psci";
50 next-level-cache = <&A57_L2>;
49 }; 51 };
50 52
51 A53_0: cpu@100 { 53 A53_0: cpu@100 {
@@ -53,6 +55,7 @@
53 reg = <0x0 0x100>; 55 reg = <0x0 0x100>;
54 device_type = "cpu"; 56 device_type = "cpu";
55 enable-method = "psci"; 57 enable-method = "psci";
58 next-level-cache = <&A53_L2>;
56 }; 59 };
57 60
58 A53_1: cpu@101 { 61 A53_1: cpu@101 {
@@ -60,6 +63,7 @@
60 reg = <0x0 0x101>; 63 reg = <0x0 0x101>;
61 device_type = "cpu"; 64 device_type = "cpu";
62 enable-method = "psci"; 65 enable-method = "psci";
66 next-level-cache = <&A53_L2>;
63 }; 67 };
64 68
65 A53_2: cpu@102 { 69 A53_2: cpu@102 {
@@ -67,6 +71,7 @@
67 reg = <0x0 0x102>; 71 reg = <0x0 0x102>;
68 device_type = "cpu"; 72 device_type = "cpu";
69 enable-method = "psci"; 73 enable-method = "psci";
74 next-level-cache = <&A53_L2>;
70 }; 75 };
71 76
72 A53_3: cpu@103 { 77 A53_3: cpu@103 {
@@ -74,6 +79,15 @@
74 reg = <0x0 0x103>; 79 reg = <0x0 0x103>;
75 device_type = "cpu"; 80 device_type = "cpu";
76 enable-method = "psci"; 81 enable-method = "psci";
82 next-level-cache = <&A53_L2>;
83 };
84
85 A57_L2: l2-cache0 {
86 compatible = "cache";
87 };
88
89 A53_L2: l2-cache1 {
90 compatible = "cache";
77 }; 91 };
78 }; 92 };
79 93
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
index efc59b3baf63..20addabbd127 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
@@ -37,6 +37,7 @@
37 reg = <0x0 0x0>; 37 reg = <0x0 0x0>;
38 enable-method = "spin-table"; 38 enable-method = "spin-table";
39 cpu-release-addr = <0x0 0x8000fff8>; 39 cpu-release-addr = <0x0 0x8000fff8>;
40 next-level-cache = <&L2_0>;
40 }; 41 };
41 cpu@1 { 42 cpu@1 {
42 device_type = "cpu"; 43 device_type = "cpu";
@@ -44,6 +45,7 @@
44 reg = <0x0 0x1>; 45 reg = <0x0 0x1>;
45 enable-method = "spin-table"; 46 enable-method = "spin-table";
46 cpu-release-addr = <0x0 0x8000fff8>; 47 cpu-release-addr = <0x0 0x8000fff8>;
48 next-level-cache = <&L2_0>;
47 }; 49 };
48 cpu@2 { 50 cpu@2 {
49 device_type = "cpu"; 51 device_type = "cpu";
@@ -51,6 +53,7 @@
51 reg = <0x0 0x2>; 53 reg = <0x0 0x2>;
52 enable-method = "spin-table"; 54 enable-method = "spin-table";
53 cpu-release-addr = <0x0 0x8000fff8>; 55 cpu-release-addr = <0x0 0x8000fff8>;
56 next-level-cache = <&L2_0>;
54 }; 57 };
55 cpu@3 { 58 cpu@3 {
56 device_type = "cpu"; 59 device_type = "cpu";
@@ -58,6 +61,11 @@
58 reg = <0x0 0x3>; 61 reg = <0x0 0x3>;
59 enable-method = "spin-table"; 62 enable-method = "spin-table";
60 cpu-release-addr = <0x0 0x8000fff8>; 63 cpu-release-addr = <0x0 0x8000fff8>;
64 next-level-cache = <&L2_0>;
65 };
66
67 L2_0: l2-cache0 {
68 compatible = "cache";
61 }; 69 };
62 }; 70 };
63 71
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index 5720608c50b1..abb79b3cfcfe 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -29,7 +29,7 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o
29obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o 29obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
30aes-neon-blk-y := aes-glue-neon.o aes-neon.o 30aes-neon-blk-y := aes-glue-neon.o aes-neon.o
31 31
32AFLAGS_aes-ce.o := -DINTERLEAVE=2 -DINTERLEAVE_INLINE 32AFLAGS_aes-ce.o := -DINTERLEAVE=4
33AFLAGS_aes-neon.o := -DINTERLEAVE=4 33AFLAGS_aes-neon.o := -DINTERLEAVE=4
34 34
35CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS 35CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 5901480bfdca..750bac4e637e 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -20,6 +20,9 @@
20#error "Only include this from assembly code" 20#error "Only include this from assembly code"
21#endif 21#endif
22 22
23#ifndef __ASM_ASSEMBLER_H
24#define __ASM_ASSEMBLER_H
25
23#include <asm/ptrace.h> 26#include <asm/ptrace.h>
24#include <asm/thread_info.h> 27#include <asm/thread_info.h>
25 28
@@ -155,3 +158,5 @@ lr .req x30 // link register
155#endif 158#endif
156 orr \rd, \lbits, \hbits, lsl #32 159 orr \rd, \lbits, \hbits, lsl #32
157 .endm 160 .endm
161
162#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index 0710654631e7..c60643f14cda 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -1,6 +1,8 @@
1#ifndef __ASM_CPUIDLE_H 1#ifndef __ASM_CPUIDLE_H
2#define __ASM_CPUIDLE_H 2#define __ASM_CPUIDLE_H
3 3
4#include <asm/proc-fns.h>
5
4#ifdef CONFIG_CPU_IDLE 6#ifdef CONFIG_CPU_IDLE
5extern int cpu_init_idle(unsigned int cpu); 7extern int cpu_init_idle(unsigned int cpu);
6extern int cpu_suspend(unsigned long arg); 8extern int cpu_suspend(unsigned long arg);
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index e2ff32a93b5c..d2f49423c5dc 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -264,8 +264,10 @@ __AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000)
264__AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000) 264__AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000)
265__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) 265__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000)
266__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) 266__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000)
267__AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) 267__AARCH64_INSN_FUNCS(cbz, 0x7F000000, 0x34000000)
268__AARCH64_INSN_FUNCS(cbnz, 0xFE000000, 0x35000000) 268__AARCH64_INSN_FUNCS(cbnz, 0x7F000000, 0x35000000)
269__AARCH64_INSN_FUNCS(tbz, 0x7F000000, 0x36000000)
270__AARCH64_INSN_FUNCS(tbnz, 0x7F000000, 0x37000000)
269__AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000) 271__AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000)
270__AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) 272__AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001)
271__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) 273__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 16449c535e50..800ec0e87ed9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -460,7 +460,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
460static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 460static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
461{ 461{
462 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 462 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
463 PTE_PROT_NONE | PTE_VALID | PTE_WRITE; 463 PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
464 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 464 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
465 return pte; 465 return pte;
466} 466}
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index f9be30ea1cbd..20e9591a60cf 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -45,7 +45,8 @@
45#define STACK_TOP STACK_TOP_MAX 45#define STACK_TOP STACK_TOP_MAX
46#endif /* CONFIG_COMPAT */ 46#endif /* CONFIG_COMPAT */
47 47
48#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK 48extern phys_addr_t arm64_dma_phys_limit;
49#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
49#endif /* __KERNEL__ */ 50#endif /* __KERNEL__ */
50 51
51struct debug_info { 52struct debug_info {
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 73f0ce570fb3..4abe9b945f77 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -24,11 +24,6 @@
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <asm/cputype.h> 25#include <asm/cputype.h>
26 26
27extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
28extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
29
30extern struct cpu_tlb_fns cpu_tlb;
31
32/* 27/*
33 * TLB Management 28 * TLB Management
34 * ============== 29 * ==============
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 3bf8f4e99a51..07e1ba449bf1 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -63,7 +63,7 @@ static inline void set_fs(mm_segment_t fs)
63 current_thread_info()->addr_limit = fs; 63 current_thread_info()->addr_limit = fs;
64} 64}
65 65
66#define segment_eq(a,b) ((a) == (b)) 66#define segment_eq(a, b) ((a) == (b))
67 67
68/* 68/*
69 * Return 1 if addr < current->addr_limit, 0 otherwise. 69 * Return 1 if addr < current->addr_limit, 0 otherwise.
@@ -147,7 +147,7 @@ do { \
147 default: \ 147 default: \
148 BUILD_BUG(); \ 148 BUILD_BUG(); \
149 } \ 149 } \
150 (x) = (__typeof__(*(ptr)))__gu_val; \ 150 (x) = (__force __typeof__(*(ptr)))__gu_val; \
151} while (0) 151} while (0)
152 152
153#define __get_user(x, ptr) \ 153#define __get_user(x, ptr) \
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index bef04afd6031..5ee07eee80c2 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -15,8 +15,9 @@ CFLAGS_REMOVE_return_address.o = -pg
15arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ 15arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
16 entry-fpsimd.o process.o ptrace.o setup.o signal.o \ 16 entry-fpsimd.o process.o ptrace.o setup.o signal.o \
17 sys.o stacktrace.o time.o traps.o io.o vdso.o \ 17 sys.o stacktrace.o time.o traps.o io.o vdso.o \
18 hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ 18 hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \
19 cpuinfo.o cpu_errata.o alternative.o cacheinfo.o 19 return_address.o cpuinfo.o cpu_errata.o \
20 alternative.o cacheinfo.o
20 21
21arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ 22arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
22 sys_compat.o entry32.o \ 23 sys_compat.o entry32.o \
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index cf8556ae09d0..c851be795080 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -156,7 +156,7 @@ static int ftrace_modify_graph_caller(bool enable)
156 156
157 branch = aarch64_insn_gen_branch_imm(pc, 157 branch = aarch64_insn_gen_branch_imm(pc,
158 (unsigned long)ftrace_graph_caller, 158 (unsigned long)ftrace_graph_caller,
159 AARCH64_INSN_BRANCH_LINK); 159 AARCH64_INSN_BRANCH_NOLINK);
160 nop = aarch64_insn_gen_nop(); 160 nop = aarch64_insn_gen_nop();
161 161
162 if (enable) 162 if (enable)
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 27d4864577e5..c8eca88f12e6 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -87,8 +87,10 @@ static void __kprobes *patch_map(void *addr, int fixmap)
87 87
88 if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) 88 if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
89 page = vmalloc_to_page(addr); 89 page = vmalloc_to_page(addr);
90 else 90 else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
91 page = virt_to_page(addr); 91 page = virt_to_page(addr);
92 else
93 return addr;
92 94
93 BUG_ON(!page); 95 BUG_ON(!page);
94 set_fixmap(fixmap, page_to_phys(page)); 96 set_fixmap(fixmap, page_to_phys(page));
diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S
new file mode 100644
index 000000000000..cf83e61cd3b5
--- /dev/null
+++ b/arch/arm64/kernel/psci-call.S
@@ -0,0 +1,28 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2015 ARM Limited
12 *
13 * Author: Will Deacon <will.deacon@arm.com>
14 */
15
16#include <linux/linkage.h>
17
18/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
19ENTRY(__invoke_psci_fn_hvc)
20 hvc #0
21 ret
22ENDPROC(__invoke_psci_fn_hvc)
23
24/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
25ENTRY(__invoke_psci_fn_smc)
26 smc #0
27 ret
28ENDPROC(__invoke_psci_fn_smc)
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 3425f311c49e..9b8a70ae64a1 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -57,6 +57,9 @@ static struct psci_operations psci_ops;
57static int (*invoke_psci_fn)(u64, u64, u64, u64); 57static int (*invoke_psci_fn)(u64, u64, u64, u64);
58typedef int (*psci_initcall_t)(const struct device_node *); 58typedef int (*psci_initcall_t)(const struct device_node *);
59 59
60asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64);
61asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
62
60enum psci_function { 63enum psci_function {
61 PSCI_FN_CPU_SUSPEND, 64 PSCI_FN_CPU_SUSPEND,
62 PSCI_FN_CPU_ON, 65 PSCI_FN_CPU_ON,
@@ -109,40 +112,6 @@ static void psci_power_state_unpack(u32 power_state,
109 PSCI_0_2_POWER_STATE_AFFL_SHIFT; 112 PSCI_0_2_POWER_STATE_AFFL_SHIFT;
110} 113}
111 114
112/*
113 * The following two functions are invoked via the invoke_psci_fn pointer
114 * and will not be inlined, allowing us to piggyback on the AAPCS.
115 */
116static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1,
117 u64 arg2)
118{
119 asm volatile(
120 __asmeq("%0", "x0")
121 __asmeq("%1", "x1")
122 __asmeq("%2", "x2")
123 __asmeq("%3", "x3")
124 "hvc #0\n"
125 : "+r" (function_id)
126 : "r" (arg0), "r" (arg1), "r" (arg2));
127
128 return function_id;
129}
130
131static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1,
132 u64 arg2)
133{
134 asm volatile(
135 __asmeq("%0", "x0")
136 __asmeq("%1", "x1")
137 __asmeq("%2", "x2")
138 __asmeq("%3", "x3")
139 "smc #0\n"
140 : "+r" (function_id)
141 : "r" (arg0), "r" (arg1), "r" (arg2));
142
143 return function_id;
144}
145
146static int psci_get_version(void) 115static int psci_get_version(void)
147{ 116{
148 int err; 117 int err;
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index c20a300e2213..d26fcd4cd6e6 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -154,8 +154,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
154 case __SI_TIMER: 154 case __SI_TIMER:
155 err |= __put_user(from->si_tid, &to->si_tid); 155 err |= __put_user(from->si_tid, &to->si_tid);
156 err |= __put_user(from->si_overrun, &to->si_overrun); 156 err |= __put_user(from->si_overrun, &to->si_overrun);
157 err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, 157 err |= __put_user(from->si_int, &to->si_int);
158 &to->si_ptr);
159 break; 158 break;
160 case __SI_POLL: 159 case __SI_POLL:
161 err |= __put_user(from->si_band, &to->si_band); 160 err |= __put_user(from->si_band, &to->si_band);
@@ -184,7 +183,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
184 case __SI_MESGQ: /* But this is */ 183 case __SI_MESGQ: /* But this is */
185 err |= __put_user(from->si_pid, &to->si_pid); 184 err |= __put_user(from->si_pid, &to->si_pid);
186 err |= __put_user(from->si_uid, &to->si_uid); 185 err |= __put_user(from->si_uid, &to->si_uid);
187 err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); 186 err |= __put_user(from->si_int, &to->si_int);
188 break; 187 break;
189 case __SI_SYS: 188 case __SI_SYS:
190 err |= __put_user((compat_uptr_t)(unsigned long) 189 err |= __put_user((compat_uptr_t)(unsigned long)
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index fe652ffd34c2..efa79e8d4196 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -174,8 +174,6 @@ ENDPROC(__kernel_clock_gettime)
174/* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */ 174/* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */
175ENTRY(__kernel_clock_getres) 175ENTRY(__kernel_clock_getres)
176 .cfi_startproc 176 .cfi_startproc
177 cbz w1, 3f
178
179 cmp w0, #CLOCK_REALTIME 177 cmp w0, #CLOCK_REALTIME
180 ccmp w0, #CLOCK_MONOTONIC, #0x4, ne 178 ccmp w0, #CLOCK_MONOTONIC, #0x4, ne
181 b.ne 1f 179 b.ne 1f
@@ -188,6 +186,7 @@ ENTRY(__kernel_clock_getres)
188 b.ne 4f 186 b.ne 4f
189 ldr x2, 6f 187 ldr x2, 6f
1902: 1882:
189 cbz w1, 3f
191 stp xzr, x2, [x1] 190 stp xzr, x2, [x1]
192 191
1933: /* res == NULL. */ 1923: /* res == NULL. */
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 0a24b9b8c698..58e0c2bdde04 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -348,8 +348,6 @@ static struct dma_map_ops swiotlb_dma_ops = {
348 .mapping_error = swiotlb_dma_mapping_error, 348 .mapping_error = swiotlb_dma_mapping_error,
349}; 349};
350 350
351extern int swiotlb_late_init_with_default_size(size_t default_size);
352
353static int __init atomic_pool_init(void) 351static int __init atomic_pool_init(void)
354{ 352{
355 pgprot_t prot = __pgprot(PROT_NORMAL_NC); 353 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
@@ -411,21 +409,13 @@ out:
411 return -ENOMEM; 409 return -ENOMEM;
412} 410}
413 411
414static int __init swiotlb_late_init(void) 412static int __init arm64_dma_init(void)
415{ 413{
416 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); 414 int ret;
417 415
418 dma_ops = &swiotlb_dma_ops; 416 dma_ops = &swiotlb_dma_ops;
419 417
420 return swiotlb_late_init_with_default_size(swiotlb_size); 418 ret = atomic_pool_init();
421}
422
423static int __init arm64_dma_init(void)
424{
425 int ret = 0;
426
427 ret |= swiotlb_late_init();
428 ret |= atomic_pool_init();
429 419
430 return ret; 420 return ret;
431} 421}
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 71145f952070..ae85da6307bb 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -33,6 +33,7 @@
33#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
34#include <linux/dma-contiguous.h> 34#include <linux/dma-contiguous.h>
35#include <linux/efi.h> 35#include <linux/efi.h>
36#include <linux/swiotlb.h>
36 37
37#include <asm/fixmap.h> 38#include <asm/fixmap.h>
38#include <asm/memory.h> 39#include <asm/memory.h>
@@ -45,6 +46,7 @@
45#include "mm.h" 46#include "mm.h"
46 47
47phys_addr_t memstart_addr __read_mostly = 0; 48phys_addr_t memstart_addr __read_mostly = 0;
49phys_addr_t arm64_dma_phys_limit __read_mostly;
48 50
49#ifdef CONFIG_BLK_DEV_INITRD 51#ifdef CONFIG_BLK_DEV_INITRD
50static int __init early_initrd(char *p) 52static int __init early_initrd(char *p)
@@ -85,7 +87,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
85 87
86 /* 4GB maximum for 32-bit only capable devices */ 88 /* 4GB maximum for 32-bit only capable devices */
87 if (IS_ENABLED(CONFIG_ZONE_DMA)) { 89 if (IS_ENABLED(CONFIG_ZONE_DMA)) {
88 max_dma = PFN_DOWN(max_zone_dma_phys()); 90 max_dma = PFN_DOWN(arm64_dma_phys_limit);
89 zone_size[ZONE_DMA] = max_dma - min; 91 zone_size[ZONE_DMA] = max_dma - min;
90 } 92 }
91 zone_size[ZONE_NORMAL] = max - max_dma; 93 zone_size[ZONE_NORMAL] = max - max_dma;
@@ -156,8 +158,6 @@ early_param("mem", early_mem);
156 158
157void __init arm64_memblock_init(void) 159void __init arm64_memblock_init(void)
158{ 160{
159 phys_addr_t dma_phys_limit = 0;
160
161 memblock_enforce_memory_limit(memory_limit); 161 memblock_enforce_memory_limit(memory_limit);
162 162
163 /* 163 /*
@@ -174,8 +174,10 @@ void __init arm64_memblock_init(void)
174 174
175 /* 4GB maximum for 32-bit only capable devices */ 175 /* 4GB maximum for 32-bit only capable devices */
176 if (IS_ENABLED(CONFIG_ZONE_DMA)) 176 if (IS_ENABLED(CONFIG_ZONE_DMA))
177 dma_phys_limit = max_zone_dma_phys(); 177 arm64_dma_phys_limit = max_zone_dma_phys();
178 dma_contiguous_reserve(dma_phys_limit); 178 else
179 arm64_dma_phys_limit = PHYS_MASK + 1;
180 dma_contiguous_reserve(arm64_dma_phys_limit);
179 181
180 memblock_allow_resize(); 182 memblock_allow_resize();
181 memblock_dump_all(); 183 memblock_dump_all();
@@ -276,6 +278,8 @@ static void __init free_unused_memmap(void)
276 */ 278 */
277void __init mem_init(void) 279void __init mem_init(void)
278{ 280{
281 swiotlb_init(1);
282
279 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 283 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
280 284
281#ifndef CONFIG_SPARSEMEM_VMEMMAP 285#ifndef CONFIG_SPARSEMEM_VMEMMAP
diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h
index 245b2ee213c9..a46f7cf3e1ea 100644
--- a/arch/avr32/include/asm/uaccess.h
+++ b/arch/avr32/include/asm/uaccess.h
@@ -26,7 +26,7 @@ typedef struct {
26 * For historical reasons (Data Segment Register?), these macros are misnamed. 26 * For historical reasons (Data Segment Register?), these macros are misnamed.
27 */ 27 */
28#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 28#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
29#define segment_eq(a,b) ((a).is_user_space == (b).is_user_space) 29#define segment_eq(a, b) ((a).is_user_space == (b).is_user_space)
30 30
31#define USER_ADDR_LIMIT 0x80000000 31#define USER_ADDR_LIMIT 0x80000000
32 32
@@ -108,8 +108,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
108 * 108 *
109 * Returns zero on success, or -EFAULT on error. 109 * Returns zero on success, or -EFAULT on error.
110 */ 110 */
111#define put_user(x,ptr) \ 111#define put_user(x, ptr) \
112 __put_user_check((x),(ptr),sizeof(*(ptr))) 112 __put_user_check((x), (ptr), sizeof(*(ptr)))
113 113
114/* 114/*
115 * get_user: - Get a simple variable from user space. 115 * get_user: - Get a simple variable from user space.
@@ -128,8 +128,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
128 * Returns zero on success, or -EFAULT on error. 128 * Returns zero on success, or -EFAULT on error.
129 * On error, the variable @x is set to zero. 129 * On error, the variable @x is set to zero.
130 */ 130 */
131#define get_user(x,ptr) \ 131#define get_user(x, ptr) \
132 __get_user_check((x),(ptr),sizeof(*(ptr))) 132 __get_user_check((x), (ptr), sizeof(*(ptr)))
133 133
134/* 134/*
135 * __put_user: - Write a simple value into user space, with less checking. 135 * __put_user: - Write a simple value into user space, with less checking.
@@ -150,8 +150,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
150 * 150 *
151 * Returns zero on success, or -EFAULT on error. 151 * Returns zero on success, or -EFAULT on error.
152 */ 152 */
153#define __put_user(x,ptr) \ 153#define __put_user(x, ptr) \
154 __put_user_nocheck((x),(ptr),sizeof(*(ptr))) 154 __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
155 155
156/* 156/*
157 * __get_user: - Get a simple variable from user space, with less checking. 157 * __get_user: - Get a simple variable from user space, with less checking.
@@ -173,8 +173,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
173 * Returns zero on success, or -EFAULT on error. 173 * Returns zero on success, or -EFAULT on error.
174 * On error, the variable @x is set to zero. 174 * On error, the variable @x is set to zero.
175 */ 175 */
176#define __get_user(x,ptr) \ 176#define __get_user(x, ptr) \
177 __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 177 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
178 178
179extern int __get_user_bad(void); 179extern int __get_user_bad(void);
180extern int __put_user_bad(void); 180extern int __put_user_bad(void);
@@ -191,7 +191,7 @@ extern int __put_user_bad(void);
191 default: __gu_err = __get_user_bad(); break; \ 191 default: __gu_err = __get_user_bad(); break; \
192 } \ 192 } \
193 \ 193 \
194 x = (typeof(*(ptr)))__gu_val; \ 194 x = (__force typeof(*(ptr)))__gu_val; \
195 __gu_err; \ 195 __gu_err; \
196}) 196})
197 197
@@ -222,7 +222,7 @@ extern int __put_user_bad(void);
222 } else { \ 222 } else { \
223 __gu_err = -EFAULT; \ 223 __gu_err = -EFAULT; \
224 } \ 224 } \
225 x = (typeof(*(ptr)))__gu_val; \ 225 x = (__force typeof(*(ptr)))__gu_val; \
226 __gu_err; \ 226 __gu_err; \
227}) 227})
228 228
@@ -278,7 +278,7 @@ extern int __put_user_bad(void);
278 __pu_err); \ 278 __pu_err); \
279 break; \ 279 break; \
280 case 8: \ 280 case 8: \
281 __put_user_asm("d", __pu_addr, __pu_val, \ 281 __put_user_asm("d", __pu_addr, __pu_val, \
282 __pu_err); \ 282 __pu_err); \
283 break; \ 283 break; \
284 default: \ 284 default: \
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index cc92cdb9994c..1d8b147282cf 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -607,7 +607,7 @@ static struct dw_dma_platform_data dw_dmac0_data = {
607 .nr_channels = 3, 607 .nr_channels = 3,
608 .block_size = 4095U, 608 .block_size = 4095U,
609 .nr_masters = 2, 609 .nr_masters = 2,
610 .data_width = { 2, 2, 0, 0 }, 610 .data_width = { 2, 2 },
611}; 611};
612 612
613static struct resource dw_dmac0_resource[] = { 613static struct resource dw_dmac0_resource[] = {
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index 57701c3b8a59..90612a7f2cf3 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -27,7 +27,7 @@ static inline void set_fs(mm_segment_t fs)
27 current_thread_info()->addr_limit = fs; 27 current_thread_info()->addr_limit = fs;
28} 28}
29 29
30#define segment_eq(a,b) ((a) == (b)) 30#define segment_eq(a, b) ((a) == (b))
31 31
32#define VERIFY_READ 0 32#define VERIFY_READ 0
33#define VERIFY_WRITE 1 33#define VERIFY_WRITE 1
@@ -68,11 +68,11 @@ struct exception_table_entry {
68 * use the right size if we just have the right pointer type. 68 * use the right size if we just have the right pointer type.
69 */ 69 */
70 70
71#define put_user(x,p) \ 71#define put_user(x, p) \
72 ({ \ 72 ({ \
73 int _err = 0; \ 73 int _err = 0; \
74 typeof(*(p)) _x = (x); \ 74 typeof(*(p)) _x = (x); \
75 typeof(*(p)) __user *_p = (p); \ 75 typeof(*(p)) __user *_p = (p); \
76 if (!access_ok(VERIFY_WRITE, _p, sizeof(*(_p)))) {\ 76 if (!access_ok(VERIFY_WRITE, _p, sizeof(*(_p)))) {\
77 _err = -EFAULT; \ 77 _err = -EFAULT; \
78 } \ 78 } \
@@ -89,10 +89,10 @@ struct exception_table_entry {
89 break; \ 89 break; \
90 case 8: { \ 90 case 8: { \
91 long _xl, _xh; \ 91 long _xl, _xh; \
92 _xl = ((long *)&_x)[0]; \ 92 _xl = ((__force long *)&_x)[0]; \
93 _xh = ((long *)&_x)[1]; \ 93 _xh = ((__force long *)&_x)[1]; \
94 __put_user_asm(_xl, ((long __user *)_p)+0, ); \ 94 __put_user_asm(_xl, ((__force long __user *)_p)+0, );\
95 __put_user_asm(_xh, ((long __user *)_p)+1, ); \ 95 __put_user_asm(_xh, ((__force long __user *)_p)+1, );\
96 } break; \ 96 } break; \
97 default: \ 97 default: \
98 _err = __put_user_bad(); \ 98 _err = __put_user_bad(); \
@@ -102,7 +102,7 @@ struct exception_table_entry {
102 _err; \ 102 _err; \
103 }) 103 })
104 104
105#define __put_user(x,p) put_user(x,p) 105#define __put_user(x, p) put_user(x, p)
106static inline int bad_user_access_length(void) 106static inline int bad_user_access_length(void)
107{ 107{
108 panic("bad_user_access_length"); 108 panic("bad_user_access_length");
@@ -121,10 +121,10 @@ static inline int bad_user_access_length(void)
121 121
122#define __ptr(x) ((unsigned long __force *)(x)) 122#define __ptr(x) ((unsigned long __force *)(x))
123 123
124#define __put_user_asm(x,p,bhw) \ 124#define __put_user_asm(x, p, bhw) \
125 __asm__ (#bhw"[%1] = %0;\n\t" \ 125 __asm__ (#bhw"[%1] = %0;\n\t" \
126 : /* no outputs */ \ 126 : /* no outputs */ \
127 :"d" (x),"a" (__ptr(p)) : "memory") 127 :"d" (x), "a" (__ptr(p)) : "memory")
128 128
129#define get_user(x, ptr) \ 129#define get_user(x, ptr) \
130({ \ 130({ \
@@ -136,10 +136,10 @@ static inline int bad_user_access_length(void)
136 BUILD_BUG_ON(ptr_size >= 8); \ 136 BUILD_BUG_ON(ptr_size >= 8); \
137 switch (ptr_size) { \ 137 switch (ptr_size) { \
138 case 1: \ 138 case 1: \
139 __get_user_asm(_val, _p, B,(Z)); \ 139 __get_user_asm(_val, _p, B, (Z)); \
140 break; \ 140 break; \
141 case 2: \ 141 case 2: \
142 __get_user_asm(_val, _p, W,(Z)); \ 142 __get_user_asm(_val, _p, W, (Z)); \
143 break; \ 143 break; \
144 case 4: \ 144 case 4: \
145 __get_user_asm(_val, _p, , ); \ 145 __get_user_asm(_val, _p, , ); \
@@ -147,11 +147,11 @@ static inline int bad_user_access_length(void)
147 } \ 147 } \
148 } else \ 148 } else \
149 _err = -EFAULT; \ 149 _err = -EFAULT; \
150 x = (typeof(*(ptr)))_val; \ 150 x = (__force typeof(*(ptr)))_val; \
151 _err; \ 151 _err; \
152}) 152})
153 153
154#define __get_user(x,p) get_user(x,p) 154#define __get_user(x, p) get_user(x, p)
155 155
156#define __get_user_bad() (bad_user_access_length(), (-EFAULT)) 156#define __get_user_bad() (bad_user_access_length(), (-EFAULT))
157 157
@@ -168,10 +168,10 @@ static inline int bad_user_access_length(void)
168#define __copy_to_user_inatomic __copy_to_user 168#define __copy_to_user_inatomic __copy_to_user
169#define __copy_from_user_inatomic __copy_from_user 169#define __copy_from_user_inatomic __copy_from_user
170 170
171#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n))\ 171#define copy_to_user_ret(to, from, n, retval) ({ if (copy_to_user(to, from, n))\
172 return retval; }) 172 return retval; })
173 173
174#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n))\ 174#define copy_from_user_ret(to, from, n, retval) ({ if (copy_from_user(to, from, n))\
175 return retval; }) 175 return retval; })
176 176
177static inline unsigned long __must_check 177static inline unsigned long __must_check
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index 9501bd8d9cd1..68f2a8a806ea 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -666,7 +666,14 @@ static struct platform_device bfin_sport1_uart_device = {
666#endif 666#endif
667 667
668#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY) 668#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
669#include <asm/bfin_rotary.h> 669#include <linux/platform_data/bfin_rotary.h>
670
671static const u16 per_cnt[] = {
672 P_CNT_CUD,
673 P_CNT_CDG,
674 P_CNT_CZM,
675 0
676};
670 677
671static struct bfin_rotary_platform_data bfin_rotary_data = { 678static struct bfin_rotary_platform_data bfin_rotary_data = {
672 /*.rotary_up_key = KEY_UP,*/ 679 /*.rotary_up_key = KEY_UP,*/
@@ -676,10 +683,16 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
676 .debounce = 10, /* 0..17 */ 683 .debounce = 10, /* 0..17 */
677 .mode = ROT_QUAD_ENC | ROT_DEBE, 684 .mode = ROT_QUAD_ENC | ROT_DEBE,
678 .pm_wakeup = 1, 685 .pm_wakeup = 1,
686 .pin_list = per_cnt,
679}; 687};
680 688
681static struct resource bfin_rotary_resources[] = { 689static struct resource bfin_rotary_resources[] = {
682 { 690 {
691 .start = CNT_CONFIG,
692 .end = CNT_CONFIG + 0xff,
693 .flags = IORESOURCE_MEM,
694 },
695 {
683 .start = IRQ_CNT, 696 .start = IRQ_CNT,
684 .end = IRQ_CNT, 697 .end = IRQ_CNT,
685 .flags = IORESOURCE_IRQ, 698 .flags = IORESOURCE_IRQ,
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index d64f565dc2a0..d4219e8e5ab8 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -1092,7 +1092,14 @@ static struct platform_device bfin_device_gpiokeys = {
1092#endif 1092#endif
1093 1093
1094#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY) 1094#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
1095#include <asm/bfin_rotary.h> 1095#include <linux/platform_data/bfin_rotary.h>
1096
1097static const u16 per_cnt[] = {
1098 P_CNT_CUD,
1099 P_CNT_CDG,
1100 P_CNT_CZM,
1101 0
1102};
1096 1103
1097static struct bfin_rotary_platform_data bfin_rotary_data = { 1104static struct bfin_rotary_platform_data bfin_rotary_data = {
1098 /*.rotary_up_key = KEY_UP,*/ 1105 /*.rotary_up_key = KEY_UP,*/
@@ -1102,10 +1109,16 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
1102 .debounce = 10, /* 0..17 */ 1109 .debounce = 10, /* 0..17 */
1103 .mode = ROT_QUAD_ENC | ROT_DEBE, 1110 .mode = ROT_QUAD_ENC | ROT_DEBE,
1104 .pm_wakeup = 1, 1111 .pm_wakeup = 1,
1112 .pin_list = per_cnt,
1105}; 1113};
1106 1114
1107static struct resource bfin_rotary_resources[] = { 1115static struct resource bfin_rotary_resources[] = {
1108 { 1116 {
1117 .start = CNT_CONFIG,
1118 .end = CNT_CONFIG + 0xff,
1119 .flags = IORESOURCE_MEM,
1120 },
1121 {
1109 .start = IRQ_CNT, 1122 .start = IRQ_CNT,
1110 .end = IRQ_CNT, 1123 .end = IRQ_CNT,
1111 .flags = IORESOURCE_IRQ, 1124 .flags = IORESOURCE_IRQ,
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 1fe7ff286619..4204b9842532 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -159,7 +159,7 @@ static struct platform_device bf54x_kpad_device = {
159#endif 159#endif
160 160
161#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY) 161#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
162#include <asm/bfin_rotary.h> 162#include <linux/platform_data/bfin_rotary.h>
163 163
164static struct bfin_rotary_platform_data bfin_rotary_data = { 164static struct bfin_rotary_platform_data bfin_rotary_data = {
165 /*.rotary_up_key = KEY_UP,*/ 165 /*.rotary_up_key = KEY_UP,*/
@@ -173,6 +173,11 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
173 173
174static struct resource bfin_rotary_resources[] = { 174static struct resource bfin_rotary_resources[] = {
175 { 175 {
176 .start = CNT_CONFIG,
177 .end = CNT_CONFIG + 0xff,
178 .flags = IORESOURCE_MEM,
179 },
180 {
176 .start = IRQ_CNT, 181 .start = IRQ_CNT,
177 .end = IRQ_CNT, 182 .end = IRQ_CNT,
178 .flags = IORESOURCE_IRQ, 183 .flags = IORESOURCE_IRQ,
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index e2c0b024ce88..7f9fc272ec30 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -75,7 +75,7 @@ static struct platform_device bfin_isp1760_device = {
75#endif 75#endif
76 76
77#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY) 77#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
78#include <asm/bfin_rotary.h> 78#include <linux/platform_data/bfin_rotary.h>
79 79
80static struct bfin_rotary_platform_data bfin_rotary_data = { 80static struct bfin_rotary_platform_data bfin_rotary_data = {
81 /*.rotary_up_key = KEY_UP,*/ 81 /*.rotary_up_key = KEY_UP,*/
@@ -88,6 +88,11 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
88 88
89static struct resource bfin_rotary_resources[] = { 89static struct resource bfin_rotary_resources[] = {
90 { 90 {
91 .start = CNT_CONFIG,
92 .end = CNT_CONFIG + 0xff,
93 .flags = IORESOURCE_MEM,
94 },
95 {
91 .start = IRQ_CNT, 96 .start = IRQ_CNT,
92 .end = IRQ_CNT, 97 .end = IRQ_CNT,
93 .flags = IORESOURCE_IRQ, 98 .flags = IORESOURCE_IRQ,
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h
index 93bcf2abd1a1..07d7a7ef8bd5 100644
--- a/arch/frv/include/asm/pgtable.h
+++ b/arch/frv/include/asm/pgtable.h
@@ -123,12 +123,14 @@ extern unsigned long empty_zero_page;
123#define PGDIR_MASK (~(PGDIR_SIZE - 1)) 123#define PGDIR_MASK (~(PGDIR_SIZE - 1))
124#define PTRS_PER_PGD 64 124#define PTRS_PER_PGD 64
125 125
126#define __PAGETABLE_PUD_FOLDED
126#define PUD_SHIFT 26 127#define PUD_SHIFT 26
127#define PTRS_PER_PUD 1 128#define PTRS_PER_PUD 1
128#define PUD_SIZE (1UL << PUD_SHIFT) 129#define PUD_SIZE (1UL << PUD_SHIFT)
129#define PUD_MASK (~(PUD_SIZE - 1)) 130#define PUD_MASK (~(PUD_SIZE - 1))
130#define PUE_SIZE 256 131#define PUE_SIZE 256
131 132
133#define __PAGETABLE_PMD_FOLDED
132#define PMD_SHIFT 26 134#define PMD_SHIFT 26
133#define PMD_SIZE (1UL << PMD_SHIFT) 135#define PMD_SIZE (1UL << PMD_SHIFT)
134#define PMD_MASK (~(PMD_SIZE - 1)) 136#define PMD_MASK (~(PMD_SIZE - 1))
diff --git a/arch/frv/include/asm/segment.h b/arch/frv/include/asm/segment.h
index a2320a4a0042..4377c89a57f5 100644
--- a/arch/frv/include/asm/segment.h
+++ b/arch/frv/include/asm/segment.h
@@ -31,7 +31,7 @@ typedef struct {
31 31
32#define get_ds() (KERNEL_DS) 32#define get_ds() (KERNEL_DS)
33#define get_fs() (__current_thread_info->addr_limit) 33#define get_fs() (__current_thread_info->addr_limit)
34#define segment_eq(a,b) ((a).seg == (b).seg) 34#define segment_eq(a, b) ((a).seg == (b).seg)
35#define __kernel_ds_p() segment_eq(get_fs(), KERNEL_DS) 35#define __kernel_ds_p() segment_eq(get_fs(), KERNEL_DS)
36#define get_addr_limit() (get_fs().seg) 36#define get_addr_limit() (get_fs().seg)
37 37
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 103bedc59644..4f3fb6ccbf21 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -169,10 +169,11 @@ do { \
169 (err) = ia64_getreg(_IA64_REG_R8); \ 169 (err) = ia64_getreg(_IA64_REG_R8); \
170 (val) = ia64_getreg(_IA64_REG_R9); \ 170 (val) = ia64_getreg(_IA64_REG_R9); \
171} while (0) 171} while (0)
172# define __put_user_size(val, addr, n, err) \ 172# define __put_user_size(val, addr, n, err) \
173do { \ 173do { \
174 __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val)); \ 174 __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, \
175 (err) = ia64_getreg(_IA64_REG_R8); \ 175 (__force unsigned long) (val)); \
176 (err) = ia64_getreg(_IA64_REG_R8); \
176} while (0) 177} while (0)
177#endif /* !ASM_SUPPORTED */ 178#endif /* !ASM_SUPPORTED */
178 179
@@ -197,7 +198,7 @@ extern void __get_user_unknown (void);
197 case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \ 198 case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \
198 default: __get_user_unknown(); break; \ 199 default: __get_user_unknown(); break; \
199 } \ 200 } \
200 (x) = (__typeof__(*(__gu_ptr))) __gu_val; \ 201 (x) = (__force __typeof__(*(__gu_ptr))) __gu_val; \
201 __gu_err; \ 202 __gu_err; \
202}) 203})
203 204
diff --git a/arch/m32r/include/asm/pgtable-2level.h b/arch/m32r/include/asm/pgtable-2level.h
index 8fd8ee70266a..421e6ba3a173 100644
--- a/arch/m32r/include/asm/pgtable-2level.h
+++ b/arch/m32r/include/asm/pgtable-2level.h
@@ -13,6 +13,7 @@
13 * the M32R is two-level, so we don't really have any 13 * the M32R is two-level, so we don't really have any
14 * PMD directory physically. 14 * PMD directory physically.
15 */ 15 */
16#define __PAGETABLE_PMD_FOLDED
16#define PMD_SHIFT 22 17#define PMD_SHIFT 22
17#define PTRS_PER_PMD 1 18#define PTRS_PER_PMD 1
18 19
diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
index 84fe7ba53035..71adff209405 100644
--- a/arch/m32r/include/asm/uaccess.h
+++ b/arch/m32r/include/asm/uaccess.h
@@ -54,7 +54,7 @@ static inline void set_fs(mm_segment_t s)
54 54
55#endif /* not CONFIG_MMU */ 55#endif /* not CONFIG_MMU */
56 56
57#define segment_eq(a,b) ((a).seg == (b).seg) 57#define segment_eq(a, b) ((a).seg == (b).seg)
58 58
59#define __addr_ok(addr) \ 59#define __addr_ok(addr) \
60 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) 60 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
@@ -68,7 +68,7 @@ static inline void set_fs(mm_segment_t s)
68 * 68 *
69 * This needs 33-bit arithmetic. We have a carry... 69 * This needs 33-bit arithmetic. We have a carry...
70 */ 70 */
71#define __range_ok(addr,size) ({ \ 71#define __range_ok(addr, size) ({ \
72 unsigned long flag, roksum; \ 72 unsigned long flag, roksum; \
73 __chk_user_ptr(addr); \ 73 __chk_user_ptr(addr); \
74 asm ( \ 74 asm ( \
@@ -103,7 +103,7 @@ static inline void set_fs(mm_segment_t s)
103 * this function, memory access functions may still return -EFAULT. 103 * this function, memory access functions may still return -EFAULT.
104 */ 104 */
105#ifdef CONFIG_MMU 105#ifdef CONFIG_MMU
106#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) 106#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
107#else 107#else
108static inline int access_ok(int type, const void *addr, unsigned long size) 108static inline int access_ok(int type, const void *addr, unsigned long size)
109{ 109{
@@ -167,8 +167,8 @@ extern int fixup_exception(struct pt_regs *regs);
167 * Returns zero on success, or -EFAULT on error. 167 * Returns zero on success, or -EFAULT on error.
168 * On error, the variable @x is set to zero. 168 * On error, the variable @x is set to zero.
169 */ 169 */
170#define get_user(x,ptr) \ 170#define get_user(x, ptr) \
171 __get_user_check((x),(ptr),sizeof(*(ptr))) 171 __get_user_check((x), (ptr), sizeof(*(ptr)))
172 172
173/** 173/**
174 * put_user: - Write a simple value into user space. 174 * put_user: - Write a simple value into user space.
@@ -186,8 +186,8 @@ extern int fixup_exception(struct pt_regs *regs);
186 * 186 *
187 * Returns zero on success, or -EFAULT on error. 187 * Returns zero on success, or -EFAULT on error.
188 */ 188 */
189#define put_user(x,ptr) \ 189#define put_user(x, ptr) \
190 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 190 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
191 191
192/** 192/**
193 * __get_user: - Get a simple variable from user space, with less checking. 193 * __get_user: - Get a simple variable from user space, with less checking.
@@ -209,41 +209,41 @@ extern int fixup_exception(struct pt_regs *regs);
209 * Returns zero on success, or -EFAULT on error. 209 * Returns zero on success, or -EFAULT on error.
210 * On error, the variable @x is set to zero. 210 * On error, the variable @x is set to zero.
211 */ 211 */
212#define __get_user(x,ptr) \ 212#define __get_user(x, ptr) \
213 __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 213 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
214 214
215#define __get_user_nocheck(x,ptr,size) \ 215#define __get_user_nocheck(x, ptr, size) \
216({ \ 216({ \
217 long __gu_err = 0; \ 217 long __gu_err = 0; \
218 unsigned long __gu_val; \ 218 unsigned long __gu_val; \
219 might_fault(); \ 219 might_fault(); \
220 __get_user_size(__gu_val,(ptr),(size),__gu_err); \ 220 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
221 (x) = (__typeof__(*(ptr)))__gu_val; \ 221 (x) = (__force __typeof__(*(ptr)))__gu_val; \
222 __gu_err; \ 222 __gu_err; \
223}) 223})
224 224
225#define __get_user_check(x,ptr,size) \ 225#define __get_user_check(x, ptr, size) \
226({ \ 226({ \
227 long __gu_err = -EFAULT; \ 227 long __gu_err = -EFAULT; \
228 unsigned long __gu_val = 0; \ 228 unsigned long __gu_val = 0; \
229 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 229 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
230 might_fault(); \ 230 might_fault(); \
231 if (access_ok(VERIFY_READ,__gu_addr,size)) \ 231 if (access_ok(VERIFY_READ, __gu_addr, size)) \
232 __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ 232 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
233 (x) = (__typeof__(*(ptr)))__gu_val; \ 233 (x) = (__force __typeof__(*(ptr)))__gu_val; \
234 __gu_err; \ 234 __gu_err; \
235}) 235})
236 236
237extern long __get_user_bad(void); 237extern long __get_user_bad(void);
238 238
239#define __get_user_size(x,ptr,size,retval) \ 239#define __get_user_size(x, ptr, size, retval) \
240do { \ 240do { \
241 retval = 0; \ 241 retval = 0; \
242 __chk_user_ptr(ptr); \ 242 __chk_user_ptr(ptr); \
243 switch (size) { \ 243 switch (size) { \
244 case 1: __get_user_asm(x,ptr,retval,"ub"); break; \ 244 case 1: __get_user_asm(x, ptr, retval, "ub"); break; \
245 case 2: __get_user_asm(x,ptr,retval,"uh"); break; \ 245 case 2: __get_user_asm(x, ptr, retval, "uh"); break; \
246 case 4: __get_user_asm(x,ptr,retval,""); break; \ 246 case 4: __get_user_asm(x, ptr, retval, ""); break; \
247 default: (x) = __get_user_bad(); \ 247 default: (x) = __get_user_bad(); \
248 } \ 248 } \
249} while (0) 249} while (0)
@@ -288,26 +288,26 @@ do { \
288 * 288 *
289 * Returns zero on success, or -EFAULT on error. 289 * Returns zero on success, or -EFAULT on error.
290 */ 290 */
291#define __put_user(x,ptr) \ 291#define __put_user(x, ptr) \
292 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 292 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
293 293
294 294
295#define __put_user_nocheck(x,ptr,size) \ 295#define __put_user_nocheck(x, ptr, size) \
296({ \ 296({ \
297 long __pu_err; \ 297 long __pu_err; \
298 might_fault(); \ 298 might_fault(); \
299 __put_user_size((x),(ptr),(size),__pu_err); \ 299 __put_user_size((x), (ptr), (size), __pu_err); \
300 __pu_err; \ 300 __pu_err; \
301}) 301})
302 302
303 303
304#define __put_user_check(x,ptr,size) \ 304#define __put_user_check(x, ptr, size) \
305({ \ 305({ \
306 long __pu_err = -EFAULT; \ 306 long __pu_err = -EFAULT; \
307 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 307 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
308 might_fault(); \ 308 might_fault(); \
309 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ 309 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
310 __put_user_size((x),__pu_addr,(size),__pu_err); \ 310 __put_user_size((x), __pu_addr, (size), __pu_err); \
311 __pu_err; \ 311 __pu_err; \
312}) 312})
313 313
@@ -366,15 +366,15 @@ do { \
366 366
367extern void __put_user_bad(void); 367extern void __put_user_bad(void);
368 368
369#define __put_user_size(x,ptr,size,retval) \ 369#define __put_user_size(x, ptr, size, retval) \
370do { \ 370do { \
371 retval = 0; \ 371 retval = 0; \
372 __chk_user_ptr(ptr); \ 372 __chk_user_ptr(ptr); \
373 switch (size) { \ 373 switch (size) { \
374 case 1: __put_user_asm(x,ptr,retval,"b"); break; \ 374 case 1: __put_user_asm(x, ptr, retval, "b"); break; \
375 case 2: __put_user_asm(x,ptr,retval,"h"); break; \ 375 case 2: __put_user_asm(x, ptr, retval, "h"); break; \
376 case 4: __put_user_asm(x,ptr,retval,""); break; \ 376 case 4: __put_user_asm(x, ptr, retval, ""); break; \
377 case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\ 377 case 8: __put_user_u64((__typeof__(*ptr))(x), ptr, retval); break;\
378 default: __put_user_bad(); \ 378 default: __put_user_bad(); \
379 } \ 379 } \
380} while (0) 380} while (0)
@@ -421,7 +421,7 @@ struct __large_struct { unsigned long buf[100]; };
421 421
422/* Generic arbitrary sized copy. */ 422/* Generic arbitrary sized copy. */
423/* Return the number of bytes NOT copied. */ 423/* Return the number of bytes NOT copied. */
424#define __copy_user(to,from,size) \ 424#define __copy_user(to, from, size) \
425do { \ 425do { \
426 unsigned long __dst, __src, __c; \ 426 unsigned long __dst, __src, __c; \
427 __asm__ __volatile__ ( \ 427 __asm__ __volatile__ ( \
@@ -478,7 +478,7 @@ do { \
478 : "r14", "memory"); \ 478 : "r14", "memory"); \
479} while (0) 479} while (0)
480 480
481#define __copy_user_zeroing(to,from,size) \ 481#define __copy_user_zeroing(to, from, size) \
482do { \ 482do { \
483 unsigned long __dst, __src, __c; \ 483 unsigned long __dst, __src, __c; \
484 __asm__ __volatile__ ( \ 484 __asm__ __volatile__ ( \
@@ -548,14 +548,14 @@ do { \
548static inline unsigned long __generic_copy_from_user_nocheck(void *to, 548static inline unsigned long __generic_copy_from_user_nocheck(void *to,
549 const void __user *from, unsigned long n) 549 const void __user *from, unsigned long n)
550{ 550{
551 __copy_user_zeroing(to,from,n); 551 __copy_user_zeroing(to, from, n);
552 return n; 552 return n;
553} 553}
554 554
555static inline unsigned long __generic_copy_to_user_nocheck(void __user *to, 555static inline unsigned long __generic_copy_to_user_nocheck(void __user *to,
556 const void *from, unsigned long n) 556 const void *from, unsigned long n)
557{ 557{
558 __copy_user(to,from,n); 558 __copy_user(to, from, n);
559 return n; 559 return n;
560} 560}
561 561
@@ -576,8 +576,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
576 * Returns number of bytes that could not be copied. 576 * Returns number of bytes that could not be copied.
577 * On success, this will be zero. 577 * On success, this will be zero.
578 */ 578 */
579#define __copy_to_user(to,from,n) \ 579#define __copy_to_user(to, from, n) \
580 __generic_copy_to_user_nocheck((to),(from),(n)) 580 __generic_copy_to_user_nocheck((to), (from), (n))
581 581
582#define __copy_to_user_inatomic __copy_to_user 582#define __copy_to_user_inatomic __copy_to_user
583#define __copy_from_user_inatomic __copy_from_user 583#define __copy_from_user_inatomic __copy_from_user
@@ -595,10 +595,10 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
595 * Returns number of bytes that could not be copied. 595 * Returns number of bytes that could not be copied.
596 * On success, this will be zero. 596 * On success, this will be zero.
597 */ 597 */
598#define copy_to_user(to,from,n) \ 598#define copy_to_user(to, from, n) \
599({ \ 599({ \
600 might_fault(); \ 600 might_fault(); \
601 __generic_copy_to_user((to),(from),(n)); \ 601 __generic_copy_to_user((to), (from), (n)); \
602}) 602})
603 603
604/** 604/**
@@ -617,8 +617,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
617 * If some data could not be copied, this function will pad the copied 617 * If some data could not be copied, this function will pad the copied
618 * data to the requested size using zero bytes. 618 * data to the requested size using zero bytes.
619 */ 619 */
620#define __copy_from_user(to,from,n) \ 620#define __copy_from_user(to, from, n) \
621 __generic_copy_from_user_nocheck((to),(from),(n)) 621 __generic_copy_from_user_nocheck((to), (from), (n))
622 622
623/** 623/**
624 * copy_from_user: - Copy a block of data from user space. 624 * copy_from_user: - Copy a block of data from user space.
@@ -636,10 +636,10 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
636 * If some data could not be copied, this function will pad the copied 636 * If some data could not be copied, this function will pad the copied
637 * data to the requested size using zero bytes. 637 * data to the requested size using zero bytes.
638 */ 638 */
639#define copy_from_user(to,from,n) \ 639#define copy_from_user(to, from, n) \
640({ \ 640({ \
641 might_fault(); \ 641 might_fault(); \
642 __generic_copy_from_user((to),(from),(n)); \ 642 __generic_copy_from_user((to), (from), (n)); \
643}) 643})
644 644
645long __must_check strncpy_from_user(char *dst, const char __user *src, 645long __must_check strncpy_from_user(char *dst, const char __user *src,
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index 28a145bfbb71..35ed4a9981ae 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -54,10 +54,12 @@
54 */ 54 */
55#ifdef CONFIG_SUN3 55#ifdef CONFIG_SUN3
56#define PTRS_PER_PTE 16 56#define PTRS_PER_PTE 16
57#define __PAGETABLE_PMD_FOLDED
57#define PTRS_PER_PMD 1 58#define PTRS_PER_PMD 1
58#define PTRS_PER_PGD 2048 59#define PTRS_PER_PGD 2048
59#elif defined(CONFIG_COLDFIRE) 60#elif defined(CONFIG_COLDFIRE)
60#define PTRS_PER_PTE 512 61#define PTRS_PER_PTE 512
62#define __PAGETABLE_PMD_FOLDED
61#define PTRS_PER_PMD 1 63#define PTRS_PER_PMD 1
62#define PTRS_PER_PGD 1024 64#define PTRS_PER_PGD 1024
63#else 65#else
diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h
index 0fa80e97ed2d..98216b8111f0 100644
--- a/arch/m68k/include/asm/segment.h
+++ b/arch/m68k/include/asm/segment.h
@@ -58,7 +58,7 @@ static inline mm_segment_t get_ds(void)
58#define set_fs(x) (current_thread_info()->addr_limit = (x)) 58#define set_fs(x) (current_thread_info()->addr_limit = (x))
59#endif 59#endif
60 60
61#define segment_eq(a,b) ((a).seg == (b).seg) 61#define segment_eq(a, b) ((a).seg == (b).seg)
62 62
63#endif /* __ASSEMBLY__ */ 63#endif /* __ASSEMBLY__ */
64 64
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
index 15901db435b9..d228601b3afc 100644
--- a/arch/m68k/include/asm/uaccess_mm.h
+++ b/arch/m68k/include/asm/uaccess_mm.h
@@ -128,25 +128,25 @@ asm volatile ("\n" \
128#define put_user(x, ptr) __put_user(x, ptr) 128#define put_user(x, ptr) __put_user(x, ptr)
129 129
130 130
131#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ 131#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
132 type __gu_val; \ 132 type __gu_val; \
133 asm volatile ("\n" \ 133 asm volatile ("\n" \
134 "1: "MOVES"."#bwl" %2,%1\n" \ 134 "1: "MOVES"."#bwl" %2,%1\n" \
135 "2:\n" \ 135 "2:\n" \
136 " .section .fixup,\"ax\"\n" \ 136 " .section .fixup,\"ax\"\n" \
137 " .even\n" \ 137 " .even\n" \
138 "10: move.l %3,%0\n" \ 138 "10: move.l %3,%0\n" \
139 " sub.l %1,%1\n" \ 139 " sub.l %1,%1\n" \
140 " jra 2b\n" \ 140 " jra 2b\n" \
141 " .previous\n" \ 141 " .previous\n" \
142 "\n" \ 142 "\n" \
143 " .section __ex_table,\"a\"\n" \ 143 " .section __ex_table,\"a\"\n" \
144 " .align 4\n" \ 144 " .align 4\n" \
145 " .long 1b,10b\n" \ 145 " .long 1b,10b\n" \
146 " .previous" \ 146 " .previous" \
147 : "+d" (res), "=&" #reg (__gu_val) \ 147 : "+d" (res), "=&" #reg (__gu_val) \
148 : "m" (*(ptr)), "i" (err)); \ 148 : "m" (*(ptr)), "i" (err)); \
149 (x) = (typeof(*(ptr)))(unsigned long)__gu_val; \ 149 (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \
150}) 150})
151 151
152#define __get_user(x, ptr) \ 152#define __get_user(x, ptr) \
@@ -188,7 +188,7 @@ asm volatile ("\n" \
188 "+a" (__gu_ptr) \ 188 "+a" (__gu_ptr) \
189 : "i" (-EFAULT) \ 189 : "i" (-EFAULT) \
190 : "memory"); \ 190 : "memory"); \
191 (x) = (typeof(*(ptr)))__gu_val; \ 191 (x) = (__force typeof(*(ptr)))__gu_val; \
192 break; \ 192 break; \
193 } */ \ 193 } */ \
194 default: \ 194 default: \
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
index 881071c07942..13272fd5a5ba 100644
--- a/arch/metag/include/asm/processor.h
+++ b/arch/metag/include/asm/processor.h
@@ -149,8 +149,8 @@ extern void exit_thread(void);
149 149
150unsigned long get_wchan(struct task_struct *p); 150unsigned long get_wchan(struct task_struct *p);
151 151
152#define KSTK_EIP(tsk) ((tsk)->thread.kernel_context->CurrPC) 152#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ctx.CurrPC)
153#define KSTK_ESP(tsk) ((tsk)->thread.kernel_context->AX[0].U0) 153#define KSTK_ESP(tsk) (task_pt_regs(tsk)->ctx.AX[0].U0)
154 154
155#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0) 155#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)
156 156
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 0748b0a97986..8282cbce7e39 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -107,18 +107,23 @@ extern long __put_user_asm_w(unsigned int x, void __user *addr);
107extern long __put_user_asm_d(unsigned int x, void __user *addr); 107extern long __put_user_asm_d(unsigned int x, void __user *addr);
108extern long __put_user_asm_l(unsigned long long x, void __user *addr); 108extern long __put_user_asm_l(unsigned long long x, void __user *addr);
109 109
110#define __put_user_size(x, ptr, size, retval) \ 110#define __put_user_size(x, ptr, size, retval) \
111do { \ 111do { \
112 retval = 0; \ 112 retval = 0; \
113 switch (size) { \ 113 switch (size) { \
114 case 1: \ 114 case 1: \
115 retval = __put_user_asm_b((unsigned int)x, ptr); break; \ 115 retval = __put_user_asm_b((__force unsigned int)x, ptr);\
116 break; \
116 case 2: \ 117 case 2: \
117 retval = __put_user_asm_w((unsigned int)x, ptr); break; \ 118 retval = __put_user_asm_w((__force unsigned int)x, ptr);\
119 break; \
118 case 4: \ 120 case 4: \
119 retval = __put_user_asm_d((unsigned int)x, ptr); break; \ 121 retval = __put_user_asm_d((__force unsigned int)x, ptr);\
122 break; \
120 case 8: \ 123 case 8: \
121 retval = __put_user_asm_l((unsigned long long)x, ptr); break; \ 124 retval = __put_user_asm_l((__force unsigned long long)x,\
125 ptr); \
126 break; \
122 default: \ 127 default: \
123 __put_user_bad(); \ 128 __put_user_bad(); \
124 } \ 129 } \
@@ -135,7 +140,7 @@ extern long __get_user_bad(void);
135({ \ 140({ \
136 long __gu_err, __gu_val; \ 141 long __gu_err, __gu_val; \
137 __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 142 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
138 (x) = (__typeof__(*(ptr)))__gu_val; \ 143 (x) = (__force __typeof__(*(ptr)))__gu_val; \
139 __gu_err; \ 144 __gu_err; \
140}) 145})
141 146
@@ -145,7 +150,7 @@ extern long __get_user_bad(void);
145 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 150 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
146 if (access_ok(VERIFY_READ, __gu_addr, size)) \ 151 if (access_ok(VERIFY_READ, __gu_addr, size)) \
147 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 152 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
148 (x) = (__typeof__(*(ptr)))__gu_val; \ 153 (x) = (__force __typeof__(*(ptr)))__gu_val; \
149 __gu_err; \ 154 __gu_err; \
150}) 155})
151 156
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 843713c05b79..c7a16904cd03 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -54,6 +54,7 @@ config MIPS
54 select CPU_PM if CPU_IDLE 54 select CPU_PM if CPU_IDLE
55 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 55 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
56 select ARCH_BINFMT_ELF_STATE 56 select ARCH_BINFMT_ELF_STATE
57 select SYSCTL_EXCEPTION_TRACE
57 58
58menu "Machine selection" 59menu "Machine selection"
59 60
@@ -376,8 +377,10 @@ config MIPS_MALTA
376 select SYS_HAS_CPU_MIPS32_R1 377 select SYS_HAS_CPU_MIPS32_R1
377 select SYS_HAS_CPU_MIPS32_R2 378 select SYS_HAS_CPU_MIPS32_R2
378 select SYS_HAS_CPU_MIPS32_R3_5 379 select SYS_HAS_CPU_MIPS32_R3_5
380 select SYS_HAS_CPU_MIPS32_R6
379 select SYS_HAS_CPU_MIPS64_R1 381 select SYS_HAS_CPU_MIPS64_R1
380 select SYS_HAS_CPU_MIPS64_R2 382 select SYS_HAS_CPU_MIPS64_R2
383 select SYS_HAS_CPU_MIPS64_R6
381 select SYS_HAS_CPU_NEVADA 384 select SYS_HAS_CPU_NEVADA
382 select SYS_HAS_CPU_RM7000 385 select SYS_HAS_CPU_RM7000
383 select SYS_SUPPORTS_32BIT_KERNEL 386 select SYS_SUPPORTS_32BIT_KERNEL
@@ -1033,6 +1036,9 @@ config MIPS_MACHINE
1033config NO_IOPORT_MAP 1036config NO_IOPORT_MAP
1034 def_bool n 1037 def_bool n
1035 1038
1039config GENERIC_CSUM
1040 bool
1041
1036config GENERIC_ISA_DMA 1042config GENERIC_ISA_DMA
1037 bool 1043 bool
1038 select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n 1044 select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
@@ -1146,6 +1152,9 @@ config SOC_PNX8335
1146 bool 1152 bool
1147 select SOC_PNX833X 1153 select SOC_PNX833X
1148 1154
1155config MIPS_SPRAM
1156 bool
1157
1149config SWAP_IO_SPACE 1158config SWAP_IO_SPACE
1150 bool 1159 bool
1151 1160
@@ -1304,6 +1313,22 @@ config CPU_MIPS32_R2
1304 specific type of processor in your system, choose those that one 1313 specific type of processor in your system, choose those that one
1305 otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system. 1314 otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system.
1306 1315
1316config CPU_MIPS32_R6
1317 bool "MIPS32 Release 6 (EXPERIMENTAL)"
1318 depends on SYS_HAS_CPU_MIPS32_R6
1319 select CPU_HAS_PREFETCH
1320 select CPU_SUPPORTS_32BIT_KERNEL
1321 select CPU_SUPPORTS_HIGHMEM
1322 select CPU_SUPPORTS_MSA
1323 select GENERIC_CSUM
1324 select HAVE_KVM
1325 select MIPS_O32_FP64_SUPPORT
1326 help
1327 Choose this option to build a kernel for release 6 or later of the
1328 MIPS32 architecture. New MIPS processors, starting with the Warrior
1329 family, are based on a MIPS32r6 processor. If you own an older
1330 processor, you probably need to select MIPS32r1 or MIPS32r2 instead.
1331
1307config CPU_MIPS64_R1 1332config CPU_MIPS64_R1
1308 bool "MIPS64 Release 1" 1333 bool "MIPS64 Release 1"
1309 depends on SYS_HAS_CPU_MIPS64_R1 1334 depends on SYS_HAS_CPU_MIPS64_R1
@@ -1339,6 +1364,21 @@ config CPU_MIPS64_R2
1339 specific type of processor in your system, choose those that one 1364 specific type of processor in your system, choose those that one
1340 otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system. 1365 otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
1341 1366
1367config CPU_MIPS64_R6
1368 bool "MIPS64 Release 6 (EXPERIMENTAL)"
1369 depends on SYS_HAS_CPU_MIPS64_R6
1370 select CPU_HAS_PREFETCH
1371 select CPU_SUPPORTS_32BIT_KERNEL
1372 select CPU_SUPPORTS_64BIT_KERNEL
1373 select CPU_SUPPORTS_HIGHMEM
1374 select CPU_SUPPORTS_MSA
1375 select GENERIC_CSUM
1376 help
1377 Choose this option to build a kernel for release 6 or later of the
1378 MIPS64 architecture. New MIPS processors, starting with the Warrior
1379 family, are based on a MIPS64r6 processor. If you own an older
1380 processor, you probably need to select MIPS64r1 or MIPS64r2 instead.
1381
1342config CPU_R3000 1382config CPU_R3000
1343 bool "R3000" 1383 bool "R3000"
1344 depends on SYS_HAS_CPU_R3000 1384 depends on SYS_HAS_CPU_R3000
@@ -1539,7 +1579,7 @@ endchoice
1539config CPU_MIPS32_3_5_FEATURES 1579config CPU_MIPS32_3_5_FEATURES
1540 bool "MIPS32 Release 3.5 Features" 1580 bool "MIPS32 Release 3.5 Features"
1541 depends on SYS_HAS_CPU_MIPS32_R3_5 1581 depends on SYS_HAS_CPU_MIPS32_R3_5
1542 depends on CPU_MIPS32_R2 1582 depends on CPU_MIPS32_R2 || CPU_MIPS32_R6
1543 help 1583 help
1544 Choose this option to build a kernel for release 2 or later of the 1584 Choose this option to build a kernel for release 2 or later of the
1545 MIPS32 architecture including features from the 3.5 release such as 1585 MIPS32 architecture including features from the 3.5 release such as
@@ -1659,12 +1699,18 @@ config SYS_HAS_CPU_MIPS32_R2
1659config SYS_HAS_CPU_MIPS32_R3_5 1699config SYS_HAS_CPU_MIPS32_R3_5
1660 bool 1700 bool
1661 1701
1702config SYS_HAS_CPU_MIPS32_R6
1703 bool
1704
1662config SYS_HAS_CPU_MIPS64_R1 1705config SYS_HAS_CPU_MIPS64_R1
1663 bool 1706 bool
1664 1707
1665config SYS_HAS_CPU_MIPS64_R2 1708config SYS_HAS_CPU_MIPS64_R2
1666 bool 1709 bool
1667 1710
1711config SYS_HAS_CPU_MIPS64_R6
1712 bool
1713
1668config SYS_HAS_CPU_R3000 1714config SYS_HAS_CPU_R3000
1669 bool 1715 bool
1670 1716
@@ -1764,11 +1810,11 @@ endmenu
1764# 1810#
1765config CPU_MIPS32 1811config CPU_MIPS32
1766 bool 1812 bool
1767 default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 1813 default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6
1768 1814
1769config CPU_MIPS64 1815config CPU_MIPS64
1770 bool 1816 bool
1771 default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 1817 default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6
1772 1818
1773# 1819#
1774# These two indicate the revision of the architecture, either Release 1 or Release 2 1820# These two indicate the revision of the architecture, either Release 1 or Release 2
@@ -1780,6 +1826,12 @@ config CPU_MIPSR1
1780config CPU_MIPSR2 1826config CPU_MIPSR2
1781 bool 1827 bool
1782 default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON 1828 default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
1829 select MIPS_SPRAM
1830
1831config CPU_MIPSR6
1832 bool
1833 default y if CPU_MIPS32_R6 || CPU_MIPS64_R6
1834 select MIPS_SPRAM
1783 1835
1784config EVA 1836config EVA
1785 bool 1837 bool
@@ -2013,6 +2065,19 @@ config MIPS_MT_FPAFF
2013 default y 2065 default y
2014 depends on MIPS_MT_SMP 2066 depends on MIPS_MT_SMP
2015 2067
2068config MIPSR2_TO_R6_EMULATOR
2069 bool "MIPS R2-to-R6 emulator"
2070 depends on CPU_MIPSR6 && !SMP
2071 default y
2072 help
2073 Choose this option if you want to run non-R6 MIPS userland code.
2074 Even if you say 'Y' here, the emulator will still be disabled by
2075 default. You can enable it using the 'mipsr2emul' kernel option.
2076 The only reason this is a build-time option is to save ~14K from the
2077 final kernel image.
2078comment "MIPS R2-to-R6 emulator is only available for UP kernels"
2079 depends on SMP && CPU_MIPSR6
2080
2016config MIPS_VPE_LOADER 2081config MIPS_VPE_LOADER
2017 bool "VPE loader support." 2082 bool "VPE loader support."
2018 depends on SYS_SUPPORTS_MULTITHREADING && MODULES 2083 depends on SYS_SUPPORTS_MULTITHREADING && MODULES
@@ -2148,7 +2213,7 @@ config CPU_HAS_SMARTMIPS
2148 here. 2213 here.
2149 2214
2150config CPU_MICROMIPS 2215config CPU_MICROMIPS
2151 depends on 32BIT && SYS_SUPPORTS_MICROMIPS 2216 depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6
2152 bool "microMIPS" 2217 bool "microMIPS"
2153 help 2218 help
2154 When this option is enabled the kernel will be built using the 2219 When this option is enabled the kernel will be built using the
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 88a9f433f6fc..3a2b775e8458 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -122,17 +122,4 @@ config SPINLOCK_TEST
122 help 122 help
123 Add several files to the debugfs to test spinlock speed. 123 Add several files to the debugfs to test spinlock speed.
124 124
125config FP32XX_HYBRID_FPRS
126 bool "Run FP32 & FPXX code with hybrid FPRs"
127 depends on MIPS_O32_FP64_SUPPORT
128 help
129 The hybrid FPR scheme is normally used only when a program needs to
130 execute a mix of FP32 & FP64A code, since the trapping & emulation
131 that it entails is expensive. When enabled, this option will lead
132 to the kernel running programs which use the FP32 & FPXX FP ABIs
133 using the hybrid FPR scheme, which can be useful for debugging
134 purposes.
135
136 If unsure, say N.
137
138endmenu 125endmenu
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 2563a088d3b8..8f57fc72d62c 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -122,26 +122,8 @@ predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__
122cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be)) 122cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be))
123cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) 123cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
124 124
125# For smartmips configurations, there are hundreds of warnings due to ISA overrides
126# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
127# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
128# similar directives in the kernel will spam the build logs with the following warnings:
129# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater
130# or
131# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
132# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
133# been fixed properly.
134cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) -Wa,--no-warn
135cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips)
136
137cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ 125cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
138 -fno-omit-frame-pointer 126 -fno-omit-frame-pointer
139
140ifeq ($(CONFIG_CPU_HAS_MSA),y)
141toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa)
142cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
143endif
144
145# 127#
146# CPU-dependent compiler/assembler options for optimization. 128# CPU-dependent compiler/assembler options for optimization.
147# 129#
@@ -156,10 +138,12 @@ cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS
156 -Wa,-mips32 -Wa,--trap 138 -Wa,-mips32 -Wa,--trap
157cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ 139cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
158 -Wa,-mips32r2 -Wa,--trap 140 -Wa,-mips32r2 -Wa,--trap
141cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap
159cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ 142cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
160 -Wa,-mips64 -Wa,--trap 143 -Wa,-mips64 -Wa,--trap
161cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ 144cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
162 -Wa,-mips64r2 -Wa,--trap 145 -Wa,-mips64r2 -Wa,--trap
146cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
163cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap 147cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
164cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \ 148cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \
165 -Wa,--trap 149 -Wa,--trap
@@ -182,6 +166,16 @@ cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon
182endif 166endif
183cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1 167cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
184cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap 168cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap
169#
170# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
171# as MIPS64 R1; older versions as just R1. This leaves the possibility open
172# that GCC might generate R2 code for -march=loongson3a which then is rejected
173# by GAS. The cc-option can't probe for this behaviour so -march=loongson3a
174# can't easily be used safely within the kbuild framework.
175#
176cflags-$(CONFIG_CPU_LOONGSON3) += \
177 $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
178 -Wa,-mips64r2 -Wa,--trap
185 179
186cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,) 180cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
187cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,) 181cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
@@ -194,6 +188,23 @@ KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds
194endif 188endif
195endif 189endif
196 190
191# For smartmips configurations, there are hundreds of warnings due to ISA overrides
192# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
193# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
194# similar directives in the kernel will spam the build logs with the following warnings:
195# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater
196# or
197# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
198# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
199# been fixed properly.
200mips-cflags := "$(cflags-y)"
201cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn
202cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,$(mips-cflags),-mmicromips)
203ifeq ($(CONFIG_CPU_HAS_MSA),y)
204toolchain-msa := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa)
205cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
206endif
207
197# 208#
198# Firmware support 209# Firmware support
199# 210#
@@ -287,7 +298,11 @@ boot-y += vmlinux.ecoff
287boot-y += vmlinux.srec 298boot-y += vmlinux.srec
288ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0) 299ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0)
289boot-y += uImage 300boot-y += uImage
301boot-y += uImage.bin
302boot-y += uImage.bz2
290boot-y += uImage.gz 303boot-y += uImage.gz
304boot-y += uImage.lzma
305boot-y += uImage.lzo
291endif 306endif
292 307
293# compressed boot image targets (arch/mips/boot/compressed/) 308# compressed boot image targets (arch/mips/boot/compressed/)
@@ -386,7 +401,11 @@ define archhelp
386 echo ' vmlinuz.bin - Raw binary zboot image' 401 echo ' vmlinuz.bin - Raw binary zboot image'
387 echo ' vmlinuz.srec - SREC zboot image' 402 echo ' vmlinuz.srec - SREC zboot image'
388 echo ' uImage - U-Boot image' 403 echo ' uImage - U-Boot image'
404 echo ' uImage.bin - U-Boot image (uncompressed)'
405 echo ' uImage.bz2 - U-Boot image (bz2)'
389 echo ' uImage.gz - U-Boot image (gzip)' 406 echo ' uImage.gz - U-Boot image (gzip)'
407 echo ' uImage.lzma - U-Boot image (lzma)'
408 echo ' uImage.lzo - U-Boot image (lzo)'
390 echo ' dtbs - Device-tree blobs for enabled boards' 409 echo ' dtbs - Device-tree blobs for enabled boards'
391 echo 410 echo
392 echo ' These will be default as appropriate for a configured platform.' 411 echo ' These will be default as appropriate for a configured platform.'
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
index 48a9dfc55b51..6a98d2cb402c 100644
--- a/arch/mips/alchemy/common/clock.c
+++ b/arch/mips/alchemy/common/clock.c
@@ -127,12 +127,20 @@ static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw,
127 t = 396000000; 127 t = 396000000;
128 else { 128 else {
129 t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f; 129 t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
130 if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
131 t &= 0x3f;
130 t *= parent_rate; 132 t *= parent_rate;
131 } 133 }
132 134
133 return t; 135 return t;
134} 136}
135 137
138void __init alchemy_set_lpj(void)
139{
140 preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE);
141 preset_lpj /= 2 * HZ;
142}
143
136static struct clk_ops alchemy_clkops_cpu = { 144static struct clk_ops alchemy_clkops_cpu = {
137 .recalc_rate = alchemy_clk_cpu_recalc, 145 .recalc_rate = alchemy_clk_cpu_recalc,
138}; 146};
@@ -315,17 +323,26 @@ static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct)
315 323
316/* lrclk: external synchronous static bus clock ***********************/ 324/* lrclk: external synchronous static bus clock ***********************/
317 325
318static struct clk __init *alchemy_clk_setup_lrclk(const char *pn) 326static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t)
319{ 327{
320 /* MEM_STCFG0[15:13] = divisor. 328 /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5,
329 * otherwise lrclk=pclk/4.
330 * All other variants: MEM_STCFG0[15:13] = divisor.
321 * L/RCLK = periph_clk / (divisor + 1) 331 * L/RCLK = periph_clk / (divisor + 1)
322 * On Au1000, Au1500, Au1100 it's called LCLK, 332 * On Au1000, Au1500, Au1100 it's called LCLK,
323 * on later models it's called RCLK, but it's the same thing. 333 * on later models it's called RCLK, but it's the same thing.
324 */ 334 */
325 struct clk *c; 335 struct clk *c;
326 unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0) >> 13; 336 unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0);
327 337
328 v = (v & 7) + 1; 338 switch (t) {
339 case ALCHEMY_CPU_AU1000:
340 case ALCHEMY_CPU_AU1500:
341 v = 4 + ((v >> 11) & 1);
342 break;
343 default: /* all other models */
344 v = ((v >> 13) & 7) + 1;
345 }
329 c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK, 346 c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK,
330 pn, 0, 1, v); 347 pn, 0, 1, v);
331 if (!IS_ERR(c)) 348 if (!IS_ERR(c))
@@ -546,6 +563,8 @@ static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw,
546} 563}
547 564
548static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate, 565static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate,
566 unsigned long min_rate,
567 unsigned long max_rate,
549 unsigned long *best_parent_rate, 568 unsigned long *best_parent_rate,
550 struct clk_hw **best_parent_clk) 569 struct clk_hw **best_parent_clk)
551{ 570{
@@ -678,6 +697,8 @@ static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw,
678} 697}
679 698
680static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, 699static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate,
700 unsigned long min_rate,
701 unsigned long max_rate,
681 unsigned long *best_parent_rate, 702 unsigned long *best_parent_rate,
682 struct clk_hw **best_parent_clk) 703 struct clk_hw **best_parent_clk)
683{ 704{
@@ -897,6 +918,8 @@ static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate,
897} 918}
898 919
899static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate, 920static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate,
921 unsigned long min_rate,
922 unsigned long max_rate,
900 unsigned long *best_parent_rate, 923 unsigned long *best_parent_rate,
901 struct clk_hw **best_parent_clk) 924 struct clk_hw **best_parent_clk)
902{ 925{
@@ -1060,7 +1083,7 @@ static int __init alchemy_clk_init(void)
1060 ERRCK(c) 1083 ERRCK(c)
1061 1084
1062 /* L/RCLK: external static bus clock for synchronous mode */ 1085 /* L/RCLK: external static bus clock for synchronous mode */
1063 c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK); 1086 c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype);
1064 ERRCK(c) 1087 ERRCK(c)
1065 1088
1066 /* Frequency dividers 0-5 */ 1089 /* Frequency dividers 0-5 */
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index 4e72daf12c32..2902138b3e0f 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -34,10 +34,12 @@
34#include <au1000.h> 34#include <au1000.h>
35 35
36extern void __init board_setup(void); 36extern void __init board_setup(void);
37extern void set_cpuspec(void); 37extern void __init alchemy_set_lpj(void);
38 38
39void __init plat_mem_setup(void) 39void __init plat_mem_setup(void)
40{ 40{
41 alchemy_set_lpj();
42
41 if (au1xxx_cpu_needs_config_od()) 43 if (au1xxx_cpu_needs_config_od())
42 /* Various early Au1xx0 errata corrected by this */ 44 /* Various early Au1xx0 errata corrected by this */
43 set_c0_config(1 << 19); /* Set Config[OD] */ 45 set_c0_config(1 << 19); /* Set Config[OD] */
diff --git a/arch/mips/bcm3384/irq.c b/arch/mips/bcm3384/irq.c
index 0fb5134fb832..fd94fe849af6 100644
--- a/arch/mips/bcm3384/irq.c
+++ b/arch/mips/bcm3384/irq.c
@@ -180,7 +180,7 @@ static int __init intc_of_init(struct device_node *node,
180 180
181static struct of_device_id of_irq_ids[] __initdata = { 181static struct of_device_id of_irq_ids[] __initdata = {
182 { .compatible = "mti,cpu-interrupt-controller", 182 { .compatible = "mti,cpu-interrupt-controller",
183 .data = mips_cpu_intc_init }, 183 .data = mips_cpu_irq_of_init },
184 { .compatible = "brcm,bcm3384-intc", 184 { .compatible = "brcm,bcm3384-intc",
185 .data = intc_of_init }, 185 .data = intc_of_init },
186 {}, 186 {},
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 1466c0026093..acb1988f354e 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -23,6 +23,12 @@ strip-flags := $(addprefix --remove-section=,$(drop-sections))
23 23
24hostprogs-y := elf2ecoff 24hostprogs-y := elf2ecoff
25 25
26suffix-y := bin
27suffix-$(CONFIG_KERNEL_BZIP2) := bz2
28suffix-$(CONFIG_KERNEL_GZIP) := gz
29suffix-$(CONFIG_KERNEL_LZMA) := lzma
30suffix-$(CONFIG_KERNEL_LZO) := lzo
31
26targets := vmlinux.ecoff 32targets := vmlinux.ecoff
27quiet_cmd_ecoff = ECOFF $@ 33quiet_cmd_ecoff = ECOFF $@
28 cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag) 34 cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag)
@@ -44,14 +50,53 @@ $(obj)/vmlinux.srec: $(VMLINUX) FORCE
44UIMAGE_LOADADDR = $(VMLINUX_LOAD_ADDRESS) 50UIMAGE_LOADADDR = $(VMLINUX_LOAD_ADDRESS)
45UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS) 51UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS)
46 52
53#
54# Compressed vmlinux images
55#
56
57extra-y += vmlinux.bin.bz2
58extra-y += vmlinux.bin.gz
59extra-y += vmlinux.bin.lzma
60extra-y += vmlinux.bin.lzo
61
62$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
63 $(call if_changed,bzip2)
64
47$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 65$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
48 $(call if_changed,gzip) 66 $(call if_changed,gzip)
49 67
68$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
69 $(call if_changed,lzma)
70
71$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
72 $(call if_changed,lzo)
73
74#
75# Compressed u-boot images
76#
77
78targets += uImage
79targets += uImage.bin
80targets += uImage.bz2
50targets += uImage.gz 81targets += uImage.gz
82targets += uImage.lzma
83targets += uImage.lzo
84
85$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
86 $(call if_changed,uimage,none)
87
88$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
89 $(call if_changed,uimage,bzip2)
90
51$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE 91$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
52 $(call if_changed,uimage,gzip) 92 $(call if_changed,uimage,gzip)
53 93
54targets += uImage 94$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
55$(obj)/uImage: $(obj)/uImage.gz FORCE 95 $(call if_changed,uimage,lzma)
96
97$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
98 $(call if_changed,uimage,lzo)
99
100$(obj)/uImage: $(obj)/uImage.$(suffix-y)
56 @ln -sf $(notdir $<) $@ 101 @ln -sf $(notdir $<) $@
57 @echo ' Image $@ is ready' 102 @echo ' Image $@ is ready'
diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c
index 2a4c52e27f41..266c8137e859 100644
--- a/arch/mips/boot/elf2ecoff.c
+++ b/arch/mips/boot/elf2ecoff.c
@@ -268,7 +268,6 @@ int main(int argc, char *argv[])
268 Elf32_Ehdr ex; 268 Elf32_Ehdr ex;
269 Elf32_Phdr *ph; 269 Elf32_Phdr *ph;
270 Elf32_Shdr *sh; 270 Elf32_Shdr *sh;
271 char *shstrtab;
272 int i, pad; 271 int i, pad;
273 struct sect text, data, bss; 272 struct sect text, data, bss;
274 struct filehdr efh; 273 struct filehdr efh;
@@ -336,9 +335,6 @@ int main(int argc, char *argv[])
336 "sh"); 335 "sh");
337 if (must_convert_endian) 336 if (must_convert_endian)
338 convert_elf_shdrs(sh, ex.e_shnum); 337 convert_elf_shdrs(sh, ex.e_shnum);
339 /* Read in the section string table. */
340 shstrtab = saveRead(infile, sh[ex.e_shstrndx].sh_offset,
341 sh[ex.e_shstrndx].sh_size, "shstrtab");
342 338
343 /* Figure out if we can cram the program header into an ECOFF 339 /* Figure out if we can cram the program header into an ECOFF
344 header... Basically, we can't handle anything but loadable 340 header... Basically, we can't handle anything but loadable
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
index b752c4ed0b79..1882e6475dd0 100644
--- a/arch/mips/cavium-octeon/csrc-octeon.c
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -18,7 +18,7 @@
18#include <asm/octeon/octeon.h> 18#include <asm/octeon/octeon.h>
19#include <asm/octeon/cvmx-ipd-defs.h> 19#include <asm/octeon/cvmx-ipd-defs.h>
20#include <asm/octeon/cvmx-mio-defs.h> 20#include <asm/octeon/cvmx-mio-defs.h>
21 21#include <asm/octeon/cvmx-rst-defs.h>
22 22
23static u64 f; 23static u64 f;
24static u64 rdiv; 24static u64 rdiv;
@@ -39,11 +39,20 @@ void __init octeon_setup_delays(void)
39 39
40 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { 40 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
41 union cvmx_mio_rst_boot rst_boot; 41 union cvmx_mio_rst_boot rst_boot;
42
42 rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); 43 rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
43 rdiv = rst_boot.s.c_mul; /* CPU clock */ 44 rdiv = rst_boot.s.c_mul; /* CPU clock */
44 sdiv = rst_boot.s.pnr_mul; /* I/O clock */ 45 sdiv = rst_boot.s.pnr_mul; /* I/O clock */
45 f = (0x8000000000000000ull / sdiv) * 2; 46 f = (0x8000000000000000ull / sdiv) * 2;
47 } else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) {
48 union cvmx_rst_boot rst_boot;
49
50 rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
51 rdiv = rst_boot.s.c_mul; /* CPU clock */
52 sdiv = rst_boot.s.pnr_mul; /* I/O clock */
53 f = (0x8000000000000000ull / sdiv) * 2;
46 } 54 }
55
47} 56}
48 57
49/* 58/*
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 3778655c4a37..7d8987818ccf 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -276,7 +276,7 @@ void __init plat_swiotlb_setup(void)
276 continue; 276 continue;
277 277
278 /* These addresses map low for PCI. */ 278 /* These addresses map low for PCI. */
279 if (e->addr > 0x410000000ull && !OCTEON_IS_MODEL(OCTEON_CN6XXX)) 279 if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2())
280 continue; 280 continue;
281 281
282 addr_size += e->size; 282 addr_size += e->size;
@@ -308,7 +308,7 @@ void __init plat_swiotlb_setup(void)
308#endif 308#endif
309#ifdef CONFIG_USB_OCTEON_OHCI 309#ifdef CONFIG_USB_OCTEON_OHCI
310 /* OCTEON II ohci is only 32-bit. */ 310 /* OCTEON II ohci is only 32-bit. */
311 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && max_addr >= 0x100000000ul) 311 if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
312 swiotlbsize = 64 * (1<<20); 312 swiotlbsize = 64 * (1<<20);
313#endif 313#endif
314 swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT; 314 swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index 5dfef84b9576..9eb0feef4417 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -767,7 +767,7 @@ enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(vo
767 break; 767 break;
768 } 768 }
769 /* Most boards except NIC10e use a 12MHz crystal */ 769 /* Most boards except NIC10e use a 12MHz crystal */
770 if (OCTEON_IS_MODEL(OCTEON_FAM_2)) 770 if (OCTEON_IS_OCTEON2())
771 return USB_CLOCK_TYPE_CRYSTAL_12; 771 return USB_CLOCK_TYPE_CRYSTAL_12;
772 return USB_CLOCK_TYPE_REF_48; 772 return USB_CLOCK_TYPE_REF_48;
773} 773}
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 2bc4aa95944e..10f762557b92 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -3,12 +3,14 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2004-2012 Cavium, Inc. 6 * Copyright (C) 2004-2014 Cavium, Inc.
7 */ 7 */
8 8
9#include <linux/of_address.h>
9#include <linux/interrupt.h> 10#include <linux/interrupt.h>
10#include <linux/irqdomain.h> 11#include <linux/irqdomain.h>
11#include <linux/bitops.h> 12#include <linux/bitops.h>
13#include <linux/of_irq.h>
12#include <linux/percpu.h> 14#include <linux/percpu.h>
13#include <linux/slab.h> 15#include <linux/slab.h>
14#include <linux/irq.h> 16#include <linux/irq.h>
@@ -22,16 +24,25 @@ static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
22static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); 24static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
23static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); 25static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
24 26
27struct octeon_irq_ciu_domain_data {
28 int num_sum; /* number of sum registers (2 or 3). */
29};
30
25static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; 31static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
26 32
27union octeon_ciu_chip_data { 33struct octeon_ciu_chip_data {
28 void *p; 34 union {
29 unsigned long l; 35 struct { /* only used for ciu3 */
30 struct { 36 u64 ciu3_addr;
31 unsigned long line:6; 37 unsigned int intsn;
32 unsigned long bit:6; 38 };
33 unsigned long gpio_line:6; 39 struct { /* only used for ciu/ciu2 */
34 } s; 40 u8 line;
41 u8 bit;
42 u8 gpio_line;
43 };
44 };
45 int current_cpu; /* Next CPU expected to take this irq */
35}; 46};
36 47
37struct octeon_core_chip_data { 48struct octeon_core_chip_data {
@@ -45,27 +56,40 @@ struct octeon_core_chip_data {
45 56
46static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; 57static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
47 58
48static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, 59static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
49 struct irq_chip *chip, 60 struct irq_chip *chip,
50 irq_flow_handler_t handler) 61 irq_flow_handler_t handler)
51{ 62{
52 union octeon_ciu_chip_data cd; 63 struct octeon_ciu_chip_data *cd;
64
65 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
66 if (!cd)
67 return -ENOMEM;
53 68
54 irq_set_chip_and_handler(irq, chip, handler); 69 irq_set_chip_and_handler(irq, chip, handler);
55 70
56 cd.l = 0; 71 cd->line = line;
57 cd.s.line = line; 72 cd->bit = bit;
58 cd.s.bit = bit; 73 cd->gpio_line = gpio_line;
59 cd.s.gpio_line = gpio_line;
60 74
61 irq_set_chip_data(irq, cd.p); 75 irq_set_chip_data(irq, cd);
62 octeon_irq_ciu_to_irq[line][bit] = irq; 76 octeon_irq_ciu_to_irq[line][bit] = irq;
77 return 0;
63} 78}
64 79
65static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, 80static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
66 int irq, int line, int bit)
67{ 81{
68 irq_domain_associate(domain, irq, line << 6 | bit); 82 struct irq_data *data = irq_get_irq_data(irq);
83 struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
84
85 irq_set_chip_data(irq, NULL);
86 kfree(cd);
87}
88
89static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
90 int irq, int line, int bit)
91{
92 return irq_domain_associate(domain, irq, line << 6 | bit);
69} 93}
70 94
71static int octeon_coreid_for_cpu(int cpu) 95static int octeon_coreid_for_cpu(int cpu)
@@ -202,9 +226,10 @@ static int next_cpu_for_irq(struct irq_data *data)
202#ifdef CONFIG_SMP 226#ifdef CONFIG_SMP
203 int cpu; 227 int cpu;
204 int weight = cpumask_weight(data->affinity); 228 int weight = cpumask_weight(data->affinity);
229 struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
205 230
206 if (weight > 1) { 231 if (weight > 1) {
207 cpu = smp_processor_id(); 232 cpu = cd->current_cpu;
208 for (;;) { 233 for (;;) {
209 cpu = cpumask_next(cpu, data->affinity); 234 cpu = cpumask_next(cpu, data->affinity);
210 if (cpu >= nr_cpu_ids) { 235 if (cpu >= nr_cpu_ids) {
@@ -219,6 +244,7 @@ static int next_cpu_for_irq(struct irq_data *data)
219 } else { 244 } else {
220 cpu = smp_processor_id(); 245 cpu = smp_processor_id();
221 } 246 }
247 cd->current_cpu = cpu;
222 return cpu; 248 return cpu;
223#else 249#else
224 return smp_processor_id(); 250 return smp_processor_id();
@@ -231,15 +257,15 @@ static void octeon_irq_ciu_enable(struct irq_data *data)
231 int coreid = octeon_coreid_for_cpu(cpu); 257 int coreid = octeon_coreid_for_cpu(cpu);
232 unsigned long *pen; 258 unsigned long *pen;
233 unsigned long flags; 259 unsigned long flags;
234 union octeon_ciu_chip_data cd; 260 struct octeon_ciu_chip_data *cd;
235 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 261 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
236 262
237 cd.p = irq_data_get_irq_chip_data(data); 263 cd = irq_data_get_irq_chip_data(data);
238 264
239 raw_spin_lock_irqsave(lock, flags); 265 raw_spin_lock_irqsave(lock, flags);
240 if (cd.s.line == 0) { 266 if (cd->line == 0) {
241 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 267 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
242 __set_bit(cd.s.bit, pen); 268 __set_bit(cd->bit, pen);
243 /* 269 /*
244 * Must be visible to octeon_irq_ip{2,3}_ciu() before 270 * Must be visible to octeon_irq_ip{2,3}_ciu() before
245 * enabling the irq. 271 * enabling the irq.
@@ -248,7 +274,7 @@ static void octeon_irq_ciu_enable(struct irq_data *data)
248 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 274 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
249 } else { 275 } else {
250 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 276 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
251 __set_bit(cd.s.bit, pen); 277 __set_bit(cd->bit, pen);
252 /* 278 /*
253 * Must be visible to octeon_irq_ip{2,3}_ciu() before 279 * Must be visible to octeon_irq_ip{2,3}_ciu() before
254 * enabling the irq. 280 * enabling the irq.
@@ -263,15 +289,15 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data)
263{ 289{
264 unsigned long *pen; 290 unsigned long *pen;
265 unsigned long flags; 291 unsigned long flags;
266 union octeon_ciu_chip_data cd; 292 struct octeon_ciu_chip_data *cd;
267 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); 293 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
268 294
269 cd.p = irq_data_get_irq_chip_data(data); 295 cd = irq_data_get_irq_chip_data(data);
270 296
271 raw_spin_lock_irqsave(lock, flags); 297 raw_spin_lock_irqsave(lock, flags);
272 if (cd.s.line == 0) { 298 if (cd->line == 0) {
273 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); 299 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
274 __set_bit(cd.s.bit, pen); 300 __set_bit(cd->bit, pen);
275 /* 301 /*
276 * Must be visible to octeon_irq_ip{2,3}_ciu() before 302 * Must be visible to octeon_irq_ip{2,3}_ciu() before
277 * enabling the irq. 303 * enabling the irq.
@@ -280,7 +306,7 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data)
280 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 306 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
281 } else { 307 } else {
282 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); 308 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
283 __set_bit(cd.s.bit, pen); 309 __set_bit(cd->bit, pen);
284 /* 310 /*
285 * Must be visible to octeon_irq_ip{2,3}_ciu() before 311 * Must be visible to octeon_irq_ip{2,3}_ciu() before
286 * enabling the irq. 312 * enabling the irq.
@@ -295,15 +321,15 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data)
295{ 321{
296 unsigned long *pen; 322 unsigned long *pen;
297 unsigned long flags; 323 unsigned long flags;
298 union octeon_ciu_chip_data cd; 324 struct octeon_ciu_chip_data *cd;
299 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); 325 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
300 326
301 cd.p = irq_data_get_irq_chip_data(data); 327 cd = irq_data_get_irq_chip_data(data);
302 328
303 raw_spin_lock_irqsave(lock, flags); 329 raw_spin_lock_irqsave(lock, flags);
304 if (cd.s.line == 0) { 330 if (cd->line == 0) {
305 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); 331 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
306 __clear_bit(cd.s.bit, pen); 332 __clear_bit(cd->bit, pen);
307 /* 333 /*
308 * Must be visible to octeon_irq_ip{2,3}_ciu() before 334 * Must be visible to octeon_irq_ip{2,3}_ciu() before
309 * enabling the irq. 335 * enabling the irq.
@@ -312,7 +338,7 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data)
312 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 338 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
313 } else { 339 } else {
314 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); 340 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
315 __clear_bit(cd.s.bit, pen); 341 __clear_bit(cd->bit, pen);
316 /* 342 /*
317 * Must be visible to octeon_irq_ip{2,3}_ciu() before 343 * Must be visible to octeon_irq_ip{2,3}_ciu() before
318 * enabling the irq. 344 * enabling the irq.
@@ -328,27 +354,27 @@ static void octeon_irq_ciu_disable_all(struct irq_data *data)
328 unsigned long flags; 354 unsigned long flags;
329 unsigned long *pen; 355 unsigned long *pen;
330 int cpu; 356 int cpu;
331 union octeon_ciu_chip_data cd; 357 struct octeon_ciu_chip_data *cd;
332 raw_spinlock_t *lock; 358 raw_spinlock_t *lock;
333 359
334 cd.p = irq_data_get_irq_chip_data(data); 360 cd = irq_data_get_irq_chip_data(data);
335 361
336 for_each_online_cpu(cpu) { 362 for_each_online_cpu(cpu) {
337 int coreid = octeon_coreid_for_cpu(cpu); 363 int coreid = octeon_coreid_for_cpu(cpu);
338 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 364 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
339 if (cd.s.line == 0) 365 if (cd->line == 0)
340 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 366 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
341 else 367 else
342 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 368 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
343 369
344 raw_spin_lock_irqsave(lock, flags); 370 raw_spin_lock_irqsave(lock, flags);
345 __clear_bit(cd.s.bit, pen); 371 __clear_bit(cd->bit, pen);
346 /* 372 /*
347 * Must be visible to octeon_irq_ip{2,3}_ciu() before 373 * Must be visible to octeon_irq_ip{2,3}_ciu() before
348 * enabling the irq. 374 * enabling the irq.
349 */ 375 */
350 wmb(); 376 wmb();
351 if (cd.s.line == 0) 377 if (cd->line == 0)
352 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 378 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
353 else 379 else
354 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 380 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -361,27 +387,27 @@ static void octeon_irq_ciu_enable_all(struct irq_data *data)
361 unsigned long flags; 387 unsigned long flags;
362 unsigned long *pen; 388 unsigned long *pen;
363 int cpu; 389 int cpu;
364 union octeon_ciu_chip_data cd; 390 struct octeon_ciu_chip_data *cd;
365 raw_spinlock_t *lock; 391 raw_spinlock_t *lock;
366 392
367 cd.p = irq_data_get_irq_chip_data(data); 393 cd = irq_data_get_irq_chip_data(data);
368 394
369 for_each_online_cpu(cpu) { 395 for_each_online_cpu(cpu) {
370 int coreid = octeon_coreid_for_cpu(cpu); 396 int coreid = octeon_coreid_for_cpu(cpu);
371 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 397 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
372 if (cd.s.line == 0) 398 if (cd->line == 0)
373 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 399 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
374 else 400 else
375 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 401 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
376 402
377 raw_spin_lock_irqsave(lock, flags); 403 raw_spin_lock_irqsave(lock, flags);
378 __set_bit(cd.s.bit, pen); 404 __set_bit(cd->bit, pen);
379 /* 405 /*
380 * Must be visible to octeon_irq_ip{2,3}_ciu() before 406 * Must be visible to octeon_irq_ip{2,3}_ciu() before
381 * enabling the irq. 407 * enabling the irq.
382 */ 408 */
383 wmb(); 409 wmb();
384 if (cd.s.line == 0) 410 if (cd->line == 0)
385 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 411 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
386 else 412 else
387 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 413 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -397,45 +423,106 @@ static void octeon_irq_ciu_enable_v2(struct irq_data *data)
397{ 423{
398 u64 mask; 424 u64 mask;
399 int cpu = next_cpu_for_irq(data); 425 int cpu = next_cpu_for_irq(data);
400 union octeon_ciu_chip_data cd; 426 struct octeon_ciu_chip_data *cd;
401 427
402 cd.p = irq_data_get_irq_chip_data(data); 428 cd = irq_data_get_irq_chip_data(data);
403 mask = 1ull << (cd.s.bit); 429 mask = 1ull << (cd->bit);
404 430
405 /* 431 /*
406 * Called under the desc lock, so these should never get out 432 * Called under the desc lock, so these should never get out
407 * of sync. 433 * of sync.
408 */ 434 */
409 if (cd.s.line == 0) { 435 if (cd->line == 0) {
410 int index = octeon_coreid_for_cpu(cpu) * 2; 436 int index = octeon_coreid_for_cpu(cpu) * 2;
411 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 437 set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
412 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 438 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
413 } else { 439 } else {
414 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 440 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
415 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 441 set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
416 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 442 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
417 } 443 }
418} 444}
419 445
420/* 446/*
447 * Enable the irq in the sum2 registers.
448 */
449static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
450{
451 u64 mask;
452 int cpu = next_cpu_for_irq(data);
453 int index = octeon_coreid_for_cpu(cpu);
454 struct octeon_ciu_chip_data *cd;
455
456 cd = irq_data_get_irq_chip_data(data);
457 mask = 1ull << (cd->bit);
458
459 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
460}
461
462/*
463 * Disable the irq in the sum2 registers.
464 */
465static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
466{
467 u64 mask;
468 int cpu = next_cpu_for_irq(data);
469 int index = octeon_coreid_for_cpu(cpu);
470 struct octeon_ciu_chip_data *cd;
471
472 cd = irq_data_get_irq_chip_data(data);
473 mask = 1ull << (cd->bit);
474
475 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
476}
477
478static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
479{
480 u64 mask;
481 int cpu = next_cpu_for_irq(data);
482 int index = octeon_coreid_for_cpu(cpu);
483 struct octeon_ciu_chip_data *cd;
484
485 cd = irq_data_get_irq_chip_data(data);
486 mask = 1ull << (cd->bit);
487
488 cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
489}
490
491static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
492{
493 int cpu;
494 struct octeon_ciu_chip_data *cd;
495 u64 mask;
496
497 cd = irq_data_get_irq_chip_data(data);
498 mask = 1ull << (cd->bit);
499
500 for_each_online_cpu(cpu) {
501 int coreid = octeon_coreid_for_cpu(cpu);
502
503 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
504 }
505}
506
507/*
421 * Enable the irq on the current CPU for chips that 508 * Enable the irq on the current CPU for chips that
422 * have the EN*_W1{S,C} registers. 509 * have the EN*_W1{S,C} registers.
423 */ 510 */
424static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) 511static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
425{ 512{
426 u64 mask; 513 u64 mask;
427 union octeon_ciu_chip_data cd; 514 struct octeon_ciu_chip_data *cd;
428 515
429 cd.p = irq_data_get_irq_chip_data(data); 516 cd = irq_data_get_irq_chip_data(data);
430 mask = 1ull << (cd.s.bit); 517 mask = 1ull << (cd->bit);
431 518
432 if (cd.s.line == 0) { 519 if (cd->line == 0) {
433 int index = cvmx_get_core_num() * 2; 520 int index = cvmx_get_core_num() * 2;
434 set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); 521 set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
435 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 522 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
436 } else { 523 } else {
437 int index = cvmx_get_core_num() * 2 + 1; 524 int index = cvmx_get_core_num() * 2 + 1;
438 set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); 525 set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
439 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 526 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
440 } 527 }
441} 528}
@@ -443,18 +530,18 @@ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
443static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) 530static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
444{ 531{
445 u64 mask; 532 u64 mask;
446 union octeon_ciu_chip_data cd; 533 struct octeon_ciu_chip_data *cd;
447 534
448 cd.p = irq_data_get_irq_chip_data(data); 535 cd = irq_data_get_irq_chip_data(data);
449 mask = 1ull << (cd.s.bit); 536 mask = 1ull << (cd->bit);
450 537
451 if (cd.s.line == 0) { 538 if (cd->line == 0) {
452 int index = cvmx_get_core_num() * 2; 539 int index = cvmx_get_core_num() * 2;
453 clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); 540 clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
454 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 541 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
455 } else { 542 } else {
456 int index = cvmx_get_core_num() * 2 + 1; 543 int index = cvmx_get_core_num() * 2 + 1;
457 clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); 544 clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
458 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 545 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
459 } 546 }
460} 547}
@@ -465,12 +552,12 @@ static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
465static void octeon_irq_ciu_ack(struct irq_data *data) 552static void octeon_irq_ciu_ack(struct irq_data *data)
466{ 553{
467 u64 mask; 554 u64 mask;
468 union octeon_ciu_chip_data cd; 555 struct octeon_ciu_chip_data *cd;
469 556
470 cd.p = irq_data_get_irq_chip_data(data); 557 cd = irq_data_get_irq_chip_data(data);
471 mask = 1ull << (cd.s.bit); 558 mask = 1ull << (cd->bit);
472 559
473 if (cd.s.line == 0) { 560 if (cd->line == 0) {
474 int index = cvmx_get_core_num() * 2; 561 int index = cvmx_get_core_num() * 2;
475 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); 562 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
476 } else { 563 } else {
@@ -486,21 +573,23 @@ static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
486{ 573{
487 int cpu; 574 int cpu;
488 u64 mask; 575 u64 mask;
489 union octeon_ciu_chip_data cd; 576 struct octeon_ciu_chip_data *cd;
490 577
491 cd.p = irq_data_get_irq_chip_data(data); 578 cd = irq_data_get_irq_chip_data(data);
492 mask = 1ull << (cd.s.bit); 579 mask = 1ull << (cd->bit);
493 580
494 if (cd.s.line == 0) { 581 if (cd->line == 0) {
495 for_each_online_cpu(cpu) { 582 for_each_online_cpu(cpu) {
496 int index = octeon_coreid_for_cpu(cpu) * 2; 583 int index = octeon_coreid_for_cpu(cpu) * 2;
497 clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 584 clear_bit(cd->bit,
585 &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
498 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 586 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
499 } 587 }
500 } else { 588 } else {
501 for_each_online_cpu(cpu) { 589 for_each_online_cpu(cpu) {
502 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 590 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
503 clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 591 clear_bit(cd->bit,
592 &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
504 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 593 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
505 } 594 }
506 } 595 }
@@ -514,21 +603,23 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
514{ 603{
515 int cpu; 604 int cpu;
516 u64 mask; 605 u64 mask;
517 union octeon_ciu_chip_data cd; 606 struct octeon_ciu_chip_data *cd;
518 607
519 cd.p = irq_data_get_irq_chip_data(data); 608 cd = irq_data_get_irq_chip_data(data);
520 mask = 1ull << (cd.s.bit); 609 mask = 1ull << (cd->bit);
521 610
522 if (cd.s.line == 0) { 611 if (cd->line == 0) {
523 for_each_online_cpu(cpu) { 612 for_each_online_cpu(cpu) {
524 int index = octeon_coreid_for_cpu(cpu) * 2; 613 int index = octeon_coreid_for_cpu(cpu) * 2;
525 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 614 set_bit(cd->bit,
615 &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
526 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 616 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
527 } 617 }
528 } else { 618 } else {
529 for_each_online_cpu(cpu) { 619 for_each_online_cpu(cpu) {
530 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 620 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
531 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 621 set_bit(cd->bit,
622 &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
532 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 623 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
533 } 624 }
534 } 625 }
@@ -537,10 +628,10 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
537static void octeon_irq_gpio_setup(struct irq_data *data) 628static void octeon_irq_gpio_setup(struct irq_data *data)
538{ 629{
539 union cvmx_gpio_bit_cfgx cfg; 630 union cvmx_gpio_bit_cfgx cfg;
540 union octeon_ciu_chip_data cd; 631 struct octeon_ciu_chip_data *cd;
541 u32 t = irqd_get_trigger_type(data); 632 u32 t = irqd_get_trigger_type(data);
542 633
543 cd.p = irq_data_get_irq_chip_data(data); 634 cd = irq_data_get_irq_chip_data(data);
544 635
545 cfg.u64 = 0; 636 cfg.u64 = 0;
546 cfg.s.int_en = 1; 637 cfg.s.int_en = 1;
@@ -551,7 +642,7 @@ static void octeon_irq_gpio_setup(struct irq_data *data)
551 cfg.s.fil_cnt = 7; 642 cfg.s.fil_cnt = 7;
552 cfg.s.fil_sel = 3; 643 cfg.s.fil_sel = 3;
553 644
554 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64); 645 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
555} 646}
556 647
557static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) 648static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
@@ -576,36 +667,36 @@ static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
576 667
577static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) 668static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
578{ 669{
579 union octeon_ciu_chip_data cd; 670 struct octeon_ciu_chip_data *cd;
580 671
581 cd.p = irq_data_get_irq_chip_data(data); 672 cd = irq_data_get_irq_chip_data(data);
582 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); 673 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
583 674
584 octeon_irq_ciu_disable_all_v2(data); 675 octeon_irq_ciu_disable_all_v2(data);
585} 676}
586 677
587static void octeon_irq_ciu_disable_gpio(struct irq_data *data) 678static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
588{ 679{
589 union octeon_ciu_chip_data cd; 680 struct octeon_ciu_chip_data *cd;
590 681
591 cd.p = irq_data_get_irq_chip_data(data); 682 cd = irq_data_get_irq_chip_data(data);
592 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); 683 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
593 684
594 octeon_irq_ciu_disable_all(data); 685 octeon_irq_ciu_disable_all(data);
595} 686}
596 687
597static void octeon_irq_ciu_gpio_ack(struct irq_data *data) 688static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
598{ 689{
599 union octeon_ciu_chip_data cd; 690 struct octeon_ciu_chip_data *cd;
600 u64 mask; 691 u64 mask;
601 692
602 cd.p = irq_data_get_irq_chip_data(data); 693 cd = irq_data_get_irq_chip_data(data);
603 mask = 1ull << (cd.s.gpio_line); 694 mask = 1ull << (cd->gpio_line);
604 695
605 cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); 696 cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
606} 697}
607 698
608static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc) 699static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc)
609{ 700{
610 if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) 701 if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH)
611 handle_edge_irq(irq, desc); 702 handle_edge_irq(irq, desc);
@@ -644,11 +735,11 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
644 int cpu; 735 int cpu;
645 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 736 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
646 unsigned long flags; 737 unsigned long flags;
647 union octeon_ciu_chip_data cd; 738 struct octeon_ciu_chip_data *cd;
648 unsigned long *pen; 739 unsigned long *pen;
649 raw_spinlock_t *lock; 740 raw_spinlock_t *lock;
650 741
651 cd.p = irq_data_get_irq_chip_data(data); 742 cd = irq_data_get_irq_chip_data(data);
652 743
653 /* 744 /*
654 * For non-v2 CIU, we will allow only single CPU affinity. 745 * For non-v2 CIU, we will allow only single CPU affinity.
@@ -668,16 +759,16 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
668 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 759 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
669 raw_spin_lock_irqsave(lock, flags); 760 raw_spin_lock_irqsave(lock, flags);
670 761
671 if (cd.s.line == 0) 762 if (cd->line == 0)
672 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 763 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
673 else 764 else
674 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 765 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
675 766
676 if (cpumask_test_cpu(cpu, dest) && enable_one) { 767 if (cpumask_test_cpu(cpu, dest) && enable_one) {
677 enable_one = 0; 768 enable_one = 0;
678 __set_bit(cd.s.bit, pen); 769 __set_bit(cd->bit, pen);
679 } else { 770 } else {
680 __clear_bit(cd.s.bit, pen); 771 __clear_bit(cd->bit, pen);
681 } 772 }
682 /* 773 /*
683 * Must be visible to octeon_irq_ip{2,3}_ciu() before 774 * Must be visible to octeon_irq_ip{2,3}_ciu() before
@@ -685,7 +776,7 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
685 */ 776 */
686 wmb(); 777 wmb();
687 778
688 if (cd.s.line == 0) 779 if (cd->line == 0)
689 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 780 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
690 else 781 else
691 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 782 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -706,24 +797,24 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
706 int cpu; 797 int cpu;
707 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 798 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
708 u64 mask; 799 u64 mask;
709 union octeon_ciu_chip_data cd; 800 struct octeon_ciu_chip_data *cd;
710 801
711 if (!enable_one) 802 if (!enable_one)
712 return 0; 803 return 0;
713 804
714 cd.p = irq_data_get_irq_chip_data(data); 805 cd = irq_data_get_irq_chip_data(data);
715 mask = 1ull << cd.s.bit; 806 mask = 1ull << cd->bit;
716 807
717 if (cd.s.line == 0) { 808 if (cd->line == 0) {
718 for_each_online_cpu(cpu) { 809 for_each_online_cpu(cpu) {
719 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 810 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
720 int index = octeon_coreid_for_cpu(cpu) * 2; 811 int index = octeon_coreid_for_cpu(cpu) * 2;
721 if (cpumask_test_cpu(cpu, dest) && enable_one) { 812 if (cpumask_test_cpu(cpu, dest) && enable_one) {
722 enable_one = false; 813 enable_one = false;
723 set_bit(cd.s.bit, pen); 814 set_bit(cd->bit, pen);
724 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 815 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
725 } else { 816 } else {
726 clear_bit(cd.s.bit, pen); 817 clear_bit(cd->bit, pen);
727 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 818 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
728 } 819 }
729 } 820 }
@@ -733,16 +824,44 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
733 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 824 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
734 if (cpumask_test_cpu(cpu, dest) && enable_one) { 825 if (cpumask_test_cpu(cpu, dest) && enable_one) {
735 enable_one = false; 826 enable_one = false;
736 set_bit(cd.s.bit, pen); 827 set_bit(cd->bit, pen);
737 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 828 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
738 } else { 829 } else {
739 clear_bit(cd.s.bit, pen); 830 clear_bit(cd->bit, pen);
740 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 831 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
741 } 832 }
742 } 833 }
743 } 834 }
744 return 0; 835 return 0;
745} 836}
837
838static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
839 const struct cpumask *dest,
840 bool force)
841{
842 int cpu;
843 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
844 u64 mask;
845 struct octeon_ciu_chip_data *cd;
846
847 if (!enable_one)
848 return 0;
849
850 cd = irq_data_get_irq_chip_data(data);
851 mask = 1ull << cd->bit;
852
853 for_each_online_cpu(cpu) {
854 int index = octeon_coreid_for_cpu(cpu);
855
856 if (cpumask_test_cpu(cpu, dest) && enable_one) {
857 enable_one = false;
858 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
859 } else {
860 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
861 }
862 }
863 return 0;
864}
746#endif 865#endif
747 866
748/* 867/*
@@ -752,6 +871,18 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = {
752 .name = "CIU", 871 .name = "CIU",
753 .irq_enable = octeon_irq_ciu_enable_v2, 872 .irq_enable = octeon_irq_ciu_enable_v2,
754 .irq_disable = octeon_irq_ciu_disable_all_v2, 873 .irq_disable = octeon_irq_ciu_disable_all_v2,
874 .irq_mask = octeon_irq_ciu_disable_local_v2,
875 .irq_unmask = octeon_irq_ciu_enable_v2,
876#ifdef CONFIG_SMP
877 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
878 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
879#endif
880};
881
882static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
883 .name = "CIU",
884 .irq_enable = octeon_irq_ciu_enable_v2,
885 .irq_disable = octeon_irq_ciu_disable_all_v2,
755 .irq_ack = octeon_irq_ciu_ack, 886 .irq_ack = octeon_irq_ciu_ack,
756 .irq_mask = octeon_irq_ciu_disable_local_v2, 887 .irq_mask = octeon_irq_ciu_disable_local_v2,
757 .irq_unmask = octeon_irq_ciu_enable_v2, 888 .irq_unmask = octeon_irq_ciu_enable_v2,
@@ -761,10 +892,50 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = {
761#endif 892#endif
762}; 893};
763 894
895/*
896 * Newer octeon chips have support for lockless CIU operation.
897 */
898static struct irq_chip octeon_irq_chip_ciu_sum2 = {
899 .name = "CIU",
900 .irq_enable = octeon_irq_ciu_enable_sum2,
901 .irq_disable = octeon_irq_ciu_disable_all_sum2,
902 .irq_mask = octeon_irq_ciu_disable_local_sum2,
903 .irq_unmask = octeon_irq_ciu_enable_sum2,
904#ifdef CONFIG_SMP
905 .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
906 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
907#endif
908};
909
910static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
911 .name = "CIU",
912 .irq_enable = octeon_irq_ciu_enable_sum2,
913 .irq_disable = octeon_irq_ciu_disable_all_sum2,
914 .irq_ack = octeon_irq_ciu_ack_sum2,
915 .irq_mask = octeon_irq_ciu_disable_local_sum2,
916 .irq_unmask = octeon_irq_ciu_enable_sum2,
917#ifdef CONFIG_SMP
918 .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
919 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
920#endif
921};
922
764static struct irq_chip octeon_irq_chip_ciu = { 923static struct irq_chip octeon_irq_chip_ciu = {
765 .name = "CIU", 924 .name = "CIU",
766 .irq_enable = octeon_irq_ciu_enable, 925 .irq_enable = octeon_irq_ciu_enable,
767 .irq_disable = octeon_irq_ciu_disable_all, 926 .irq_disable = octeon_irq_ciu_disable_all,
927 .irq_mask = octeon_irq_ciu_disable_local,
928 .irq_unmask = octeon_irq_ciu_enable,
929#ifdef CONFIG_SMP
930 .irq_set_affinity = octeon_irq_ciu_set_affinity,
931 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
932#endif
933};
934
935static struct irq_chip octeon_irq_chip_ciu_edge = {
936 .name = "CIU",
937 .irq_enable = octeon_irq_ciu_enable,
938 .irq_disable = octeon_irq_ciu_disable_all,
768 .irq_ack = octeon_irq_ciu_ack, 939 .irq_ack = octeon_irq_ciu_ack,
769 .irq_mask = octeon_irq_ciu_disable_local, 940 .irq_mask = octeon_irq_ciu_disable_local,
770 .irq_unmask = octeon_irq_ciu_enable, 941 .irq_unmask = octeon_irq_ciu_enable,
@@ -970,11 +1141,12 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d,
970 unsigned int *out_type) 1141 unsigned int *out_type)
971{ 1142{
972 unsigned int ciu, bit; 1143 unsigned int ciu, bit;
1144 struct octeon_irq_ciu_domain_data *dd = d->host_data;
973 1145
974 ciu = intspec[0]; 1146 ciu = intspec[0];
975 bit = intspec[1]; 1147 bit = intspec[1];
976 1148
977 if (ciu > 1 || bit > 63) 1149 if (ciu >= dd->num_sum || bit > 63)
978 return -EINVAL; 1150 return -EINVAL;
979 1151
980 *out_hwirq = (ciu << 6) | bit; 1152 *out_hwirq = (ciu << 6) | bit;
@@ -984,6 +1156,7 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d,
984} 1156}
985 1157
986static struct irq_chip *octeon_irq_ciu_chip; 1158static struct irq_chip *octeon_irq_ciu_chip;
1159static struct irq_chip *octeon_irq_ciu_chip_edge;
987static struct irq_chip *octeon_irq_gpio_chip; 1160static struct irq_chip *octeon_irq_gpio_chip;
988 1161
989static bool octeon_irq_virq_in_range(unsigned int virq) 1162static bool octeon_irq_virq_in_range(unsigned int virq)
@@ -999,8 +1172,10 @@ static bool octeon_irq_virq_in_range(unsigned int virq)
999static int octeon_irq_ciu_map(struct irq_domain *d, 1172static int octeon_irq_ciu_map(struct irq_domain *d,
1000 unsigned int virq, irq_hw_number_t hw) 1173 unsigned int virq, irq_hw_number_t hw)
1001{ 1174{
1175 int rv;
1002 unsigned int line = hw >> 6; 1176 unsigned int line = hw >> 6;
1003 unsigned int bit = hw & 63; 1177 unsigned int bit = hw & 63;
1178 struct octeon_irq_ciu_domain_data *dd = d->host_data;
1004 1179
1005 if (!octeon_irq_virq_in_range(virq)) 1180 if (!octeon_irq_virq_in_range(virq))
1006 return -EINVAL; 1181 return -EINVAL;
@@ -1009,54 +1184,61 @@ static int octeon_irq_ciu_map(struct irq_domain *d,
1009 if (line == 0 && bit >= 16 && bit <32) 1184 if (line == 0 && bit >= 16 && bit <32)
1010 return 0; 1185 return 0;
1011 1186
1012 if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) 1187 if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
1013 return -EINVAL; 1188 return -EINVAL;
1014 1189
1015 if (octeon_irq_ciu_is_edge(line, bit)) 1190 if (line == 2) {
1016 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1191 if (octeon_irq_ciu_is_edge(line, bit))
1017 octeon_irq_ciu_chip, 1192 rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1018 handle_edge_irq); 1193 &octeon_irq_chip_ciu_sum2_edge,
1019 else 1194 handle_edge_irq);
1020 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1195 else
1021 octeon_irq_ciu_chip, 1196 rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1022 handle_level_irq); 1197 &octeon_irq_chip_ciu_sum2,
1023 1198 handle_level_irq);
1024 return 0; 1199 } else {
1200 if (octeon_irq_ciu_is_edge(line, bit))
1201 rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1202 octeon_irq_ciu_chip_edge,
1203 handle_edge_irq);
1204 else
1205 rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1206 octeon_irq_ciu_chip,
1207 handle_level_irq);
1208 }
1209 return rv;
1025} 1210}
1026 1211
1027static int octeon_irq_gpio_map_common(struct irq_domain *d, 1212static int octeon_irq_gpio_map(struct irq_domain *d,
1028 unsigned int virq, irq_hw_number_t hw, 1213 unsigned int virq, irq_hw_number_t hw)
1029 int line_limit, struct irq_chip *chip)
1030{ 1214{
1031 struct octeon_irq_gpio_domain_data *gpiod = d->host_data; 1215 struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
1032 unsigned int line, bit; 1216 unsigned int line, bit;
1217 int r;
1033 1218
1034 if (!octeon_irq_virq_in_range(virq)) 1219 if (!octeon_irq_virq_in_range(virq))
1035 return -EINVAL; 1220 return -EINVAL;
1036 1221
1037 line = (hw + gpiod->base_hwirq) >> 6; 1222 line = (hw + gpiod->base_hwirq) >> 6;
1038 bit = (hw + gpiod->base_hwirq) & 63; 1223 bit = (hw + gpiod->base_hwirq) & 63;
1039 if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) 1224 if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
1225 octeon_irq_ciu_to_irq[line][bit] != 0)
1040 return -EINVAL; 1226 return -EINVAL;
1041 1227
1042 octeon_irq_set_ciu_mapping(virq, line, bit, hw, 1228 r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
1043 chip, octeon_irq_handle_gpio); 1229 octeon_irq_gpio_chip, octeon_irq_handle_trigger);
1044 return 0; 1230 return r;
1045}
1046
1047static int octeon_irq_gpio_map(struct irq_domain *d,
1048 unsigned int virq, irq_hw_number_t hw)
1049{
1050 return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip);
1051} 1231}
1052 1232
1053static struct irq_domain_ops octeon_irq_domain_ciu_ops = { 1233static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
1054 .map = octeon_irq_ciu_map, 1234 .map = octeon_irq_ciu_map,
1235 .unmap = octeon_irq_free_cd,
1055 .xlate = octeon_irq_ciu_xlat, 1236 .xlate = octeon_irq_ciu_xlat,
1056}; 1237};
1057 1238
1058static struct irq_domain_ops octeon_irq_domain_gpio_ops = { 1239static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
1059 .map = octeon_irq_gpio_map, 1240 .map = octeon_irq_gpio_map,
1241 .unmap = octeon_irq_free_cd,
1060 .xlate = octeon_irq_gpio_xlat, 1242 .xlate = octeon_irq_gpio_xlat,
1061}; 1243};
1062 1244
@@ -1095,6 +1277,26 @@ static void octeon_irq_ip3_ciu(void)
1095 } 1277 }
1096} 1278}
1097 1279
1280static void octeon_irq_ip4_ciu(void)
1281{
1282 int coreid = cvmx_get_core_num();
1283 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
1284 u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
1285
1286 ciu_sum &= ciu_en;
1287 if (likely(ciu_sum)) {
1288 int bit = fls64(ciu_sum) - 1;
1289 int irq = octeon_irq_ciu_to_irq[2][bit];
1290
1291 if (likely(irq))
1292 do_IRQ(irq);
1293 else
1294 spurious_interrupt();
1295 } else {
1296 spurious_interrupt();
1297 }
1298}
1299
1098static bool octeon_irq_use_ip4; 1300static bool octeon_irq_use_ip4;
1099 1301
1100static void octeon_irq_local_enable_ip4(void *arg) 1302static void octeon_irq_local_enable_ip4(void *arg)
@@ -1176,7 +1378,10 @@ static void octeon_irq_setup_secondary_ciu(void)
1176 1378
1177 /* Enable the CIU lines */ 1379 /* Enable the CIU lines */
1178 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1380 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1179 clear_c0_status(STATUSF_IP4); 1381 if (octeon_irq_use_ip4)
1382 set_c0_status(STATUSF_IP4);
1383 else
1384 clear_c0_status(STATUSF_IP4);
1180} 1385}
1181 1386
1182static void octeon_irq_setup_secondary_ciu2(void) 1387static void octeon_irq_setup_secondary_ciu2(void)
@@ -1192,95 +1397,194 @@ static void octeon_irq_setup_secondary_ciu2(void)
1192 clear_c0_status(STATUSF_IP4); 1397 clear_c0_status(STATUSF_IP4);
1193} 1398}
1194 1399
1195static void __init octeon_irq_init_ciu(void) 1400static int __init octeon_irq_init_ciu(
1401 struct device_node *ciu_node, struct device_node *parent)
1196{ 1402{
1197 unsigned int i; 1403 unsigned int i, r;
1198 struct irq_chip *chip; 1404 struct irq_chip *chip;
1405 struct irq_chip *chip_edge;
1199 struct irq_chip *chip_mbox; 1406 struct irq_chip *chip_mbox;
1200 struct irq_chip *chip_wd; 1407 struct irq_chip *chip_wd;
1201 struct device_node *gpio_node;
1202 struct device_node *ciu_node;
1203 struct irq_domain *ciu_domain = NULL; 1408 struct irq_domain *ciu_domain = NULL;
1409 struct octeon_irq_ciu_domain_data *dd;
1410
1411 dd = kzalloc(sizeof(*dd), GFP_KERNEL);
1412 if (!dd)
1413 return -ENOMEM;
1204 1414
1205 octeon_irq_init_ciu_percpu(); 1415 octeon_irq_init_ciu_percpu();
1206 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; 1416 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
1207 1417
1208 octeon_irq_ip2 = octeon_irq_ip2_ciu; 1418 octeon_irq_ip2 = octeon_irq_ip2_ciu;
1209 octeon_irq_ip3 = octeon_irq_ip3_ciu; 1419 octeon_irq_ip3 = octeon_irq_ip3_ciu;
1420 if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
1421 && !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
1422 octeon_irq_ip4 = octeon_irq_ip4_ciu;
1423 dd->num_sum = 3;
1424 octeon_irq_use_ip4 = true;
1425 } else {
1426 octeon_irq_ip4 = octeon_irq_ip4_mask;
1427 dd->num_sum = 2;
1428 octeon_irq_use_ip4 = false;
1429 }
1210 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || 1430 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
1211 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || 1431 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
1212 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || 1432 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
1213 OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 1433 OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
1214 chip = &octeon_irq_chip_ciu_v2; 1434 chip = &octeon_irq_chip_ciu_v2;
1435 chip_edge = &octeon_irq_chip_ciu_v2_edge;
1215 chip_mbox = &octeon_irq_chip_ciu_mbox_v2; 1436 chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
1216 chip_wd = &octeon_irq_chip_ciu_wd_v2; 1437 chip_wd = &octeon_irq_chip_ciu_wd_v2;
1217 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; 1438 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
1218 } else { 1439 } else {
1219 chip = &octeon_irq_chip_ciu; 1440 chip = &octeon_irq_chip_ciu;
1441 chip_edge = &octeon_irq_chip_ciu_edge;
1220 chip_mbox = &octeon_irq_chip_ciu_mbox; 1442 chip_mbox = &octeon_irq_chip_ciu_mbox;
1221 chip_wd = &octeon_irq_chip_ciu_wd; 1443 chip_wd = &octeon_irq_chip_ciu_wd;
1222 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; 1444 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
1223 } 1445 }
1224 octeon_irq_ciu_chip = chip; 1446 octeon_irq_ciu_chip = chip;
1225 octeon_irq_ip4 = octeon_irq_ip4_mask; 1447 octeon_irq_ciu_chip_edge = chip_edge;
1226 1448
1227 /* Mips internal */ 1449 /* Mips internal */
1228 octeon_irq_init_core(); 1450 octeon_irq_init_core();
1229 1451
1230 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); 1452 ciu_domain = irq_domain_add_tree(
1231 if (gpio_node) { 1453 ciu_node, &octeon_irq_domain_ciu_ops, dd);
1232 struct octeon_irq_gpio_domain_data *gpiod; 1454 irq_set_default_host(ciu_domain);
1233
1234 gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
1235 if (gpiod) {
1236 /* gpio domain host_data is the base hwirq number. */
1237 gpiod->base_hwirq = 16;
1238 irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
1239 of_node_put(gpio_node);
1240 } else
1241 pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
1242 } else
1243 pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
1244
1245 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");
1246 if (ciu_node) {
1247 ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
1248 irq_set_default_host(ciu_domain);
1249 of_node_put(ciu_node);
1250 } else
1251 panic("Cannot find device node for cavium,octeon-3860-ciu.");
1252 1455
1253 /* CIU_0 */ 1456 /* CIU_0 */
1254 for (i = 0; i < 16; i++) 1457 for (i = 0; i < 16; i++) {
1255 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); 1458 r = octeon_irq_force_ciu_mapping(
1459 ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
1460 if (r)
1461 goto err;
1462 }
1463
1464 r = octeon_irq_set_ciu_mapping(
1465 OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
1466 if (r)
1467 goto err;
1468 r = octeon_irq_set_ciu_mapping(
1469 OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
1470 if (r)
1471 goto err;
1472
1473 for (i = 0; i < 4; i++) {
1474 r = octeon_irq_force_ciu_mapping(
1475 ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
1476 if (r)
1477 goto err;
1478 }
1479 for (i = 0; i < 4; i++) {
1480 r = octeon_irq_force_ciu_mapping(
1481 ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
1482 if (r)
1483 goto err;
1484 }
1256 1485
1257 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); 1486 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
1258 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); 1487 if (r)
1488 goto err;
1259 1489
1260 for (i = 0; i < 4; i++) 1490 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
1261 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); 1491 if (r)
1262 for (i = 0; i < 4; i++) 1492 goto err;
1263 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
1264 1493
1265 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); 1494 for (i = 0; i < 4; i++) {
1266 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); 1495 r = octeon_irq_force_ciu_mapping(
1267 for (i = 0; i < 4; i++) 1496 ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
1268 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); 1497 if (r)
1498 goto err;
1499 }
1500
1501 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
1502 if (r)
1503 goto err;
1269 1504
1270 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); 1505 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
1271 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); 1506 if (r)
1507 goto err;
1272 1508
1273 /* CIU_1 */ 1509 /* CIU_1 */
1274 for (i = 0; i < 16; i++) 1510 for (i = 0; i < 16; i++) {
1275 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq); 1511 r = octeon_irq_set_ciu_mapping(
1512 i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
1513 handle_level_irq);
1514 if (r)
1515 goto err;
1516 }
1276 1517
1277 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); 1518 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
1519 if (r)
1520 goto err;
1278 1521
1279 /* Enable the CIU lines */ 1522 /* Enable the CIU lines */
1280 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1523 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1281 clear_c0_status(STATUSF_IP4); 1524 if (octeon_irq_use_ip4)
1525 set_c0_status(STATUSF_IP4);
1526 else
1527 clear_c0_status(STATUSF_IP4);
1528
1529 return 0;
1530err:
1531 return r;
1282} 1532}
1283 1533
1534static int __init octeon_irq_init_gpio(
1535 struct device_node *gpio_node, struct device_node *parent)
1536{
1537 struct octeon_irq_gpio_domain_data *gpiod;
1538 u32 interrupt_cells;
1539 unsigned int base_hwirq;
1540 int r;
1541
1542 r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
1543 if (r)
1544 return r;
1545
1546 if (interrupt_cells == 1) {
1547 u32 v;
1548
1549 r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
1550 if (r) {
1551 pr_warn("No \"interrupts\" property.\n");
1552 return r;
1553 }
1554 base_hwirq = v;
1555 } else if (interrupt_cells == 2) {
1556 u32 v0, v1;
1557
1558 r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
1559 if (r) {
1560 pr_warn("No \"interrupts\" property.\n");
1561 return r;
1562 }
1563 r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
1564 if (r) {
1565 pr_warn("No \"interrupts\" property.\n");
1566 return r;
1567 }
1568 base_hwirq = (v0 << 6) | v1;
1569 } else {
1570 pr_warn("Bad \"#interrupt-cells\" property: %u\n",
1571 interrupt_cells);
1572 return -EINVAL;
1573 }
1574
1575 gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
1576 if (gpiod) {
1577 /* gpio domain host_data is the base hwirq number. */
1578 gpiod->base_hwirq = base_hwirq;
1579 irq_domain_add_linear(
1580 gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
1581 } else {
1582 pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
1583 return -ENOMEM;
1584 }
1585
1586 return 0;
1587}
1284/* 1588/*
1285 * Watchdog interrupts are special. They are associated with a single 1589 * Watchdog interrupts are special. They are associated with a single
1286 * core, so we hardwire the affinity to that core. 1590 * core, so we hardwire the affinity to that core.
@@ -1290,12 +1594,13 @@ static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
1290 u64 mask; 1594 u64 mask;
1291 u64 en_addr; 1595 u64 en_addr;
1292 int coreid = data->irq - OCTEON_IRQ_WDOG0; 1596 int coreid = data->irq - OCTEON_IRQ_WDOG0;
1293 union octeon_ciu_chip_data cd; 1597 struct octeon_ciu_chip_data *cd;
1294 1598
1295 cd.p = irq_data_get_irq_chip_data(data); 1599 cd = irq_data_get_irq_chip_data(data);
1296 mask = 1ull << (cd.s.bit); 1600 mask = 1ull << (cd->bit);
1297 1601
1298 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); 1602 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
1603 (0x1000ull * cd->line);
1299 cvmx_write_csr(en_addr, mask); 1604 cvmx_write_csr(en_addr, mask);
1300 1605
1301} 1606}
@@ -1306,12 +1611,13 @@ static void octeon_irq_ciu2_enable(struct irq_data *data)
1306 u64 en_addr; 1611 u64 en_addr;
1307 int cpu = next_cpu_for_irq(data); 1612 int cpu = next_cpu_for_irq(data);
1308 int coreid = octeon_coreid_for_cpu(cpu); 1613 int coreid = octeon_coreid_for_cpu(cpu);
1309 union octeon_ciu_chip_data cd; 1614 struct octeon_ciu_chip_data *cd;
1310 1615
1311 cd.p = irq_data_get_irq_chip_data(data); 1616 cd = irq_data_get_irq_chip_data(data);
1312 mask = 1ull << (cd.s.bit); 1617 mask = 1ull << (cd->bit);
1313 1618
1314 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); 1619 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
1620 (0x1000ull * cd->line);
1315 cvmx_write_csr(en_addr, mask); 1621 cvmx_write_csr(en_addr, mask);
1316} 1622}
1317 1623
@@ -1320,12 +1626,13 @@ static void octeon_irq_ciu2_enable_local(struct irq_data *data)
1320 u64 mask; 1626 u64 mask;
1321 u64 en_addr; 1627 u64 en_addr;
1322 int coreid = cvmx_get_core_num(); 1628 int coreid = cvmx_get_core_num();
1323 union octeon_ciu_chip_data cd; 1629 struct octeon_ciu_chip_data *cd;
1324 1630
1325 cd.p = irq_data_get_irq_chip_data(data); 1631 cd = irq_data_get_irq_chip_data(data);
1326 mask = 1ull << (cd.s.bit); 1632 mask = 1ull << (cd->bit);
1327 1633
1328 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); 1634 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
1635 (0x1000ull * cd->line);
1329 cvmx_write_csr(en_addr, mask); 1636 cvmx_write_csr(en_addr, mask);
1330 1637
1331} 1638}
@@ -1335,12 +1642,13 @@ static void octeon_irq_ciu2_disable_local(struct irq_data *data)
1335 u64 mask; 1642 u64 mask;
1336 u64 en_addr; 1643 u64 en_addr;
1337 int coreid = cvmx_get_core_num(); 1644 int coreid = cvmx_get_core_num();
1338 union octeon_ciu_chip_data cd; 1645 struct octeon_ciu_chip_data *cd;
1339 1646
1340 cd.p = irq_data_get_irq_chip_data(data); 1647 cd = irq_data_get_irq_chip_data(data);
1341 mask = 1ull << (cd.s.bit); 1648 mask = 1ull << (cd->bit);
1342 1649
1343 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line); 1650 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
1651 (0x1000ull * cd->line);
1344 cvmx_write_csr(en_addr, mask); 1652 cvmx_write_csr(en_addr, mask);
1345 1653
1346} 1654}
@@ -1350,12 +1658,12 @@ static void octeon_irq_ciu2_ack(struct irq_data *data)
1350 u64 mask; 1658 u64 mask;
1351 u64 en_addr; 1659 u64 en_addr;
1352 int coreid = cvmx_get_core_num(); 1660 int coreid = cvmx_get_core_num();
1353 union octeon_ciu_chip_data cd; 1661 struct octeon_ciu_chip_data *cd;
1354 1662
1355 cd.p = irq_data_get_irq_chip_data(data); 1663 cd = irq_data_get_irq_chip_data(data);
1356 mask = 1ull << (cd.s.bit); 1664 mask = 1ull << (cd->bit);
1357 1665
1358 en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line); 1666 en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
1359 cvmx_write_csr(en_addr, mask); 1667 cvmx_write_csr(en_addr, mask);
1360 1668
1361} 1669}
@@ -1364,13 +1672,14 @@ static void octeon_irq_ciu2_disable_all(struct irq_data *data)
1364{ 1672{
1365 int cpu; 1673 int cpu;
1366 u64 mask; 1674 u64 mask;
1367 union octeon_ciu_chip_data cd; 1675 struct octeon_ciu_chip_data *cd;
1368 1676
1369 cd.p = irq_data_get_irq_chip_data(data); 1677 cd = irq_data_get_irq_chip_data(data);
1370 mask = 1ull << (cd.s.bit); 1678 mask = 1ull << (cd->bit);
1371 1679
1372 for_each_online_cpu(cpu) { 1680 for_each_online_cpu(cpu) {
1373 u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); 1681 u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
1682 octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
1374 cvmx_write_csr(en_addr, mask); 1683 cvmx_write_csr(en_addr, mask);
1375 } 1684 }
1376} 1685}
@@ -1383,7 +1692,8 @@ static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
1383 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1692 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1384 1693
1385 for_each_online_cpu(cpu) { 1694 for_each_online_cpu(cpu) {
1386 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu)); 1695 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
1696 octeon_coreid_for_cpu(cpu));
1387 cvmx_write_csr(en_addr, mask); 1697 cvmx_write_csr(en_addr, mask);
1388 } 1698 }
1389} 1699}
@@ -1396,7 +1706,8 @@ static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
1396 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1706 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1397 1707
1398 for_each_online_cpu(cpu) { 1708 for_each_online_cpu(cpu) {
1399 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu)); 1709 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
1710 octeon_coreid_for_cpu(cpu));
1400 cvmx_write_csr(en_addr, mask); 1711 cvmx_write_csr(en_addr, mask);
1401 } 1712 }
1402} 1713}
@@ -1430,21 +1741,25 @@ static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
1430 int cpu; 1741 int cpu;
1431 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 1742 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
1432 u64 mask; 1743 u64 mask;
1433 union octeon_ciu_chip_data cd; 1744 struct octeon_ciu_chip_data *cd;
1434 1745
1435 if (!enable_one) 1746 if (!enable_one)
1436 return 0; 1747 return 0;
1437 1748
1438 cd.p = irq_data_get_irq_chip_data(data); 1749 cd = irq_data_get_irq_chip_data(data);
1439 mask = 1ull << cd.s.bit; 1750 mask = 1ull << cd->bit;
1440 1751
1441 for_each_online_cpu(cpu) { 1752 for_each_online_cpu(cpu) {
1442 u64 en_addr; 1753 u64 en_addr;
1443 if (cpumask_test_cpu(cpu, dest) && enable_one) { 1754 if (cpumask_test_cpu(cpu, dest) && enable_one) {
1444 enable_one = false; 1755 enable_one = false;
1445 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); 1756 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
1757 octeon_coreid_for_cpu(cpu)) +
1758 (0x1000ull * cd->line);
1446 } else { 1759 } else {
1447 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); 1760 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
1761 octeon_coreid_for_cpu(cpu)) +
1762 (0x1000ull * cd->line);
1448 } 1763 }
1449 cvmx_write_csr(en_addr, mask); 1764 cvmx_write_csr(en_addr, mask);
1450 } 1765 }
@@ -1461,10 +1776,11 @@ static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
1461 1776
1462static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) 1777static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
1463{ 1778{
1464 union octeon_ciu_chip_data cd; 1779 struct octeon_ciu_chip_data *cd;
1465 cd.p = irq_data_get_irq_chip_data(data); 1780
1781 cd = irq_data_get_irq_chip_data(data);
1466 1782
1467 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); 1783 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
1468 1784
1469 octeon_irq_ciu2_disable_all(data); 1785 octeon_irq_ciu2_disable_all(data);
1470} 1786}
@@ -1473,6 +1789,18 @@ static struct irq_chip octeon_irq_chip_ciu2 = {
1473 .name = "CIU2-E", 1789 .name = "CIU2-E",
1474 .irq_enable = octeon_irq_ciu2_enable, 1790 .irq_enable = octeon_irq_ciu2_enable,
1475 .irq_disable = octeon_irq_ciu2_disable_all, 1791 .irq_disable = octeon_irq_ciu2_disable_all,
1792 .irq_mask = octeon_irq_ciu2_disable_local,
1793 .irq_unmask = octeon_irq_ciu2_enable,
1794#ifdef CONFIG_SMP
1795 .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1796 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1797#endif
1798};
1799
1800static struct irq_chip octeon_irq_chip_ciu2_edge = {
1801 .name = "CIU2-E",
1802 .irq_enable = octeon_irq_ciu2_enable,
1803 .irq_disable = octeon_irq_ciu2_disable_all,
1476 .irq_ack = octeon_irq_ciu2_ack, 1804 .irq_ack = octeon_irq_ciu2_ack,
1477 .irq_mask = octeon_irq_ciu2_disable_local, 1805 .irq_mask = octeon_irq_ciu2_disable_local,
1478 .irq_unmask = octeon_irq_ciu2_enable, 1806 .irq_unmask = octeon_irq_ciu2_enable,
@@ -1582,7 +1910,7 @@ static int octeon_irq_ciu2_map(struct irq_domain *d,
1582 1910
1583 if (octeon_irq_ciu2_is_edge(line, bit)) 1911 if (octeon_irq_ciu2_is_edge(line, bit))
1584 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1912 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1585 &octeon_irq_chip_ciu2, 1913 &octeon_irq_chip_ciu2_edge,
1586 handle_edge_irq); 1914 handle_edge_irq);
1587 else 1915 else
1588 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1916 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
@@ -1591,22 +1919,13 @@ static int octeon_irq_ciu2_map(struct irq_domain *d,
1591 1919
1592 return 0; 1920 return 0;
1593} 1921}
1594static int octeon_irq_ciu2_gpio_map(struct irq_domain *d,
1595 unsigned int virq, irq_hw_number_t hw)
1596{
1597 return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio);
1598}
1599 1922
1600static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { 1923static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
1601 .map = octeon_irq_ciu2_map, 1924 .map = octeon_irq_ciu2_map,
1925 .unmap = octeon_irq_free_cd,
1602 .xlate = octeon_irq_ciu2_xlat, 1926 .xlate = octeon_irq_ciu2_xlat,
1603}; 1927};
1604 1928
1605static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = {
1606 .map = octeon_irq_ciu2_gpio_map,
1607 .xlate = octeon_irq_gpio_xlat,
1608};
1609
1610static void octeon_irq_ciu2(void) 1929static void octeon_irq_ciu2(void)
1611{ 1930{
1612 int line; 1931 int line;
@@ -1674,16 +1993,16 @@ out:
1674 return; 1993 return;
1675} 1994}
1676 1995
1677static void __init octeon_irq_init_ciu2(void) 1996static int __init octeon_irq_init_ciu2(
1997 struct device_node *ciu_node, struct device_node *parent)
1678{ 1998{
1679 unsigned int i; 1999 unsigned int i, r;
1680 struct device_node *gpio_node;
1681 struct device_node *ciu_node;
1682 struct irq_domain *ciu_domain = NULL; 2000 struct irq_domain *ciu_domain = NULL;
1683 2001
1684 octeon_irq_init_ciu2_percpu(); 2002 octeon_irq_init_ciu2_percpu();
1685 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; 2003 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
1686 2004
2005 octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
1687 octeon_irq_ip2 = octeon_irq_ciu2; 2006 octeon_irq_ip2 = octeon_irq_ciu2;
1688 octeon_irq_ip3 = octeon_irq_ciu2_mbox; 2007 octeon_irq_ip3 = octeon_irq_ciu2_mbox;
1689 octeon_irq_ip4 = octeon_irq_ip4_mask; 2008 octeon_irq_ip4 = octeon_irq_ip4_mask;
@@ -1691,47 +2010,49 @@ static void __init octeon_irq_init_ciu2(void)
1691 /* Mips internal */ 2010 /* Mips internal */
1692 octeon_irq_init_core(); 2011 octeon_irq_init_core();
1693 2012
1694 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); 2013 ciu_domain = irq_domain_add_tree(
1695 if (gpio_node) { 2014 ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
1696 struct octeon_irq_gpio_domain_data *gpiod; 2015 irq_set_default_host(ciu_domain);
1697
1698 gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
1699 if (gpiod) {
1700 /* gpio domain host_data is the base hwirq number. */
1701 gpiod->base_hwirq = 7 << 6;
1702 irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod);
1703 of_node_put(gpio_node);
1704 } else
1705 pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
1706 } else
1707 pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
1708
1709 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2");
1710 if (ciu_node) {
1711 ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
1712 irq_set_default_host(ciu_domain);
1713 of_node_put(ciu_node);
1714 } else
1715 panic("Cannot find device node for cavium,octeon-6880-ciu2.");
1716 2016
1717 /* CUI2 */ 2017 /* CUI2 */
1718 for (i = 0; i < 64; i++) 2018 for (i = 0; i < 64; i++) {
1719 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); 2019 r = octeon_irq_force_ciu_mapping(
2020 ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
2021 if (r)
2022 goto err;
2023 }
1720 2024
1721 for (i = 0; i < 32; i++) 2025 for (i = 0; i < 32; i++) {
1722 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, 2026 r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
1723 &octeon_irq_chip_ciu2_wd, handle_level_irq); 2027 &octeon_irq_chip_ciu2_wd, handle_level_irq);
2028 if (r)
2029 goto err;
2030 }
1724 2031
1725 for (i = 0; i < 4; i++) 2032 for (i = 0; i < 4; i++) {
1726 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); 2033 r = octeon_irq_force_ciu_mapping(
2034 ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
2035 if (r)
2036 goto err;
2037 }
1727 2038
1728 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); 2039 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
2040 if (r)
2041 goto err;
1729 2042
1730 for (i = 0; i < 4; i++) 2043 for (i = 0; i < 4; i++) {
1731 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); 2044 r = octeon_irq_force_ciu_mapping(
2045 ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
2046 if (r)
2047 goto err;
2048 }
1732 2049
1733 for (i = 0; i < 4; i++) 2050 for (i = 0; i < 4; i++) {
1734 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); 2051 r = octeon_irq_force_ciu_mapping(
2052 ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
2053 if (r)
2054 goto err;
2055 }
1735 2056
1736 irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 2057 irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
1737 irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 2058 irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
@@ -1741,8 +2062,242 @@ static void __init octeon_irq_init_ciu2(void)
1741 /* Enable the CIU lines */ 2062 /* Enable the CIU lines */
1742 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 2063 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1743 clear_c0_status(STATUSF_IP4); 2064 clear_c0_status(STATUSF_IP4);
2065 return 0;
2066err:
2067 return r;
2068}
2069
2070struct octeon_irq_cib_host_data {
2071 raw_spinlock_t lock;
2072 u64 raw_reg;
2073 u64 en_reg;
2074 int max_bits;
2075};
2076
2077struct octeon_irq_cib_chip_data {
2078 struct octeon_irq_cib_host_data *host_data;
2079 int bit;
2080};
2081
2082static void octeon_irq_cib_enable(struct irq_data *data)
2083{
2084 unsigned long flags;
2085 u64 en;
2086 struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
2087 struct octeon_irq_cib_host_data *host_data = cd->host_data;
2088
2089 raw_spin_lock_irqsave(&host_data->lock, flags);
2090 en = cvmx_read_csr(host_data->en_reg);
2091 en |= 1ull << cd->bit;
2092 cvmx_write_csr(host_data->en_reg, en);
2093 raw_spin_unlock_irqrestore(&host_data->lock, flags);
2094}
2095
2096static void octeon_irq_cib_disable(struct irq_data *data)
2097{
2098 unsigned long flags;
2099 u64 en;
2100 struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
2101 struct octeon_irq_cib_host_data *host_data = cd->host_data;
2102
2103 raw_spin_lock_irqsave(&host_data->lock, flags);
2104 en = cvmx_read_csr(host_data->en_reg);
2105 en &= ~(1ull << cd->bit);
2106 cvmx_write_csr(host_data->en_reg, en);
2107 raw_spin_unlock_irqrestore(&host_data->lock, flags);
2108}
2109
2110static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
2111{
2112 irqd_set_trigger_type(data, t);
2113 return IRQ_SET_MASK_OK;
2114}
2115
2116static struct irq_chip octeon_irq_chip_cib = {
2117 .name = "CIB",
2118 .irq_enable = octeon_irq_cib_enable,
2119 .irq_disable = octeon_irq_cib_disable,
2120 .irq_mask = octeon_irq_cib_disable,
2121 .irq_unmask = octeon_irq_cib_enable,
2122 .irq_set_type = octeon_irq_cib_set_type,
2123};
2124
2125static int octeon_irq_cib_xlat(struct irq_domain *d,
2126 struct device_node *node,
2127 const u32 *intspec,
2128 unsigned int intsize,
2129 unsigned long *out_hwirq,
2130 unsigned int *out_type)
2131{
2132 unsigned int type = 0;
2133
2134 if (intsize == 2)
2135 type = intspec[1];
2136
2137 switch (type) {
2138 case 0: /* unofficial value, but we might as well let it work. */
2139 case 4: /* official value for level triggering. */
2140 *out_type = IRQ_TYPE_LEVEL_HIGH;
2141 break;
2142 case 1: /* official value for edge triggering. */
2143 *out_type = IRQ_TYPE_EDGE_RISING;
2144 break;
2145 default: /* Nothing else is acceptable. */
2146 return -EINVAL;
2147 }
2148
2149 *out_hwirq = intspec[0];
2150
2151 return 0;
2152}
2153
2154static int octeon_irq_cib_map(struct irq_domain *d,
2155 unsigned int virq, irq_hw_number_t hw)
2156{
2157 struct octeon_irq_cib_host_data *host_data = d->host_data;
2158 struct octeon_irq_cib_chip_data *cd;
2159
2160 if (hw >= host_data->max_bits) {
2161 pr_err("ERROR: %s mapping %u is to big!\n",
2162 d->of_node->name, (unsigned)hw);
2163 return -EINVAL;
2164 }
2165
2166 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
2167 cd->host_data = host_data;
2168 cd->bit = hw;
2169
2170 irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
2171 handle_simple_irq);
2172 irq_set_chip_data(virq, cd);
2173 return 0;
1744} 2174}
1745 2175
2176static struct irq_domain_ops octeon_irq_domain_cib_ops = {
2177 .map = octeon_irq_cib_map,
2178 .unmap = octeon_irq_free_cd,
2179 .xlate = octeon_irq_cib_xlat,
2180};
2181
2182/* Chain to real handler. */
2183static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
2184{
2185 u64 en;
2186 u64 raw;
2187 u64 bits;
2188 int i;
2189 int irq;
2190 struct irq_domain *cib_domain = data;
2191 struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
2192
2193 en = cvmx_read_csr(host_data->en_reg);
2194 raw = cvmx_read_csr(host_data->raw_reg);
2195
2196 bits = en & raw;
2197
2198 for (i = 0; i < host_data->max_bits; i++) {
2199 if ((bits & 1ull << i) == 0)
2200 continue;
2201 irq = irq_find_mapping(cib_domain, i);
2202 if (!irq) {
2203 unsigned long flags;
2204
2205 pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
2206 i, host_data->raw_reg);
2207 raw_spin_lock_irqsave(&host_data->lock, flags);
2208 en = cvmx_read_csr(host_data->en_reg);
2209 en &= ~(1ull << i);
2210 cvmx_write_csr(host_data->en_reg, en);
2211 cvmx_write_csr(host_data->raw_reg, 1ull << i);
2212 raw_spin_unlock_irqrestore(&host_data->lock, flags);
2213 } else {
2214 struct irq_desc *desc = irq_to_desc(irq);
2215 struct irq_data *irq_data = irq_desc_get_irq_data(desc);
2216 /* If edge, acknowledge the bit we will be sending. */
2217 if (irqd_get_trigger_type(irq_data) &
2218 IRQ_TYPE_EDGE_BOTH)
2219 cvmx_write_csr(host_data->raw_reg, 1ull << i);
2220 generic_handle_irq_desc(irq, desc);
2221 }
2222 }
2223
2224 return IRQ_HANDLED;
2225}
2226
2227static int __init octeon_irq_init_cib(struct device_node *ciu_node,
2228 struct device_node *parent)
2229{
2230 const __be32 *addr;
2231 u32 val;
2232 struct octeon_irq_cib_host_data *host_data;
2233 int parent_irq;
2234 int r;
2235 struct irq_domain *cib_domain;
2236
2237 parent_irq = irq_of_parse_and_map(ciu_node, 0);
2238 if (!parent_irq) {
2239 pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
2240 ciu_node->name);
2241 return -EINVAL;
2242 }
2243
2244 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
2245 raw_spin_lock_init(&host_data->lock);
2246
2247 addr = of_get_address(ciu_node, 0, NULL, NULL);
2248 if (!addr) {
2249 pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
2250 return -EINVAL;
2251 }
2252 host_data->raw_reg = (u64)phys_to_virt(
2253 of_translate_address(ciu_node, addr));
2254
2255 addr = of_get_address(ciu_node, 1, NULL, NULL);
2256 if (!addr) {
2257 pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
2258 return -EINVAL;
2259 }
2260 host_data->en_reg = (u64)phys_to_virt(
2261 of_translate_address(ciu_node, addr));
2262
2263 r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
2264 if (r) {
2265 pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
2266 ciu_node->name);
2267 return r;
2268 }
2269 host_data->max_bits = val;
2270
2271 cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
2272 &octeon_irq_domain_cib_ops,
2273 host_data);
2274 if (!cib_domain) {
2275 pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
2276 return -ENOMEM;
2277 }
2278
2279 cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
2280 cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
2281
2282 r = request_irq(parent_irq, octeon_irq_cib_handler,
2283 IRQF_NO_THREAD, "cib", cib_domain);
2284 if (r) {
2285 pr_err("request_irq cib failed %d\n", r);
2286 return r;
2287 }
2288 pr_info("CIB interrupt controller probed: %llx %d\n",
2289 host_data->raw_reg, host_data->max_bits);
2290 return 0;
2291}
2292
2293static struct of_device_id ciu_types[] __initdata = {
2294 {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
2295 {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
2296 {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
2297 {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
2298 {}
2299};
2300
1746void __init arch_init_irq(void) 2301void __init arch_init_irq(void)
1747{ 2302{
1748#ifdef CONFIG_SMP 2303#ifdef CONFIG_SMP
@@ -1750,10 +2305,7 @@ void __init arch_init_irq(void)
1750 cpumask_clear(irq_default_affinity); 2305 cpumask_clear(irq_default_affinity);
1751 cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 2306 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
1752#endif 2307#endif
1753 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 2308 of_irq_init(ciu_types);
1754 octeon_irq_init_ciu2();
1755 else
1756 octeon_irq_init_ciu();
1757} 2309}
1758 2310
1759asmlinkage void plat_irq_dispatch(void) 2311asmlinkage void plat_irq_dispatch(void)
@@ -1767,13 +2319,13 @@ asmlinkage void plat_irq_dispatch(void)
1767 cop0_cause &= cop0_status; 2319 cop0_cause &= cop0_status;
1768 cop0_cause &= ST0_IM; 2320 cop0_cause &= ST0_IM;
1769 2321
1770 if (unlikely(cop0_cause & STATUSF_IP2)) 2322 if (cop0_cause & STATUSF_IP2)
1771 octeon_irq_ip2(); 2323 octeon_irq_ip2();
1772 else if (unlikely(cop0_cause & STATUSF_IP3)) 2324 else if (cop0_cause & STATUSF_IP3)
1773 octeon_irq_ip3(); 2325 octeon_irq_ip3();
1774 else if (unlikely(cop0_cause & STATUSF_IP4)) 2326 else if (cop0_cause & STATUSF_IP4)
1775 octeon_irq_ip4(); 2327 octeon_irq_ip4();
1776 else if (likely(cop0_cause)) 2328 else if (cop0_cause)
1777 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); 2329 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
1778 else 2330 else
1779 break; 2331 break;
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 94f888d3384e..a42110e7edbc 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -41,6 +41,7 @@
41#include <asm/octeon/octeon.h> 41#include <asm/octeon/octeon.h>
42#include <asm/octeon/pci-octeon.h> 42#include <asm/octeon/pci-octeon.h>
43#include <asm/octeon/cvmx-mio-defs.h> 43#include <asm/octeon/cvmx-mio-defs.h>
44#include <asm/octeon/cvmx-rst-defs.h>
44 45
45extern struct plat_smp_ops octeon_smp_ops; 46extern struct plat_smp_ops octeon_smp_ops;
46 47
@@ -579,12 +580,10 @@ void octeon_user_io_init(void)
579 /* R/W If set, CVMSEG is available for loads/stores in user 580 /* R/W If set, CVMSEG is available for loads/stores in user
580 * mode. */ 581 * mode. */
581 cvmmemctl.s.cvmsegenau = 0; 582 cvmmemctl.s.cvmsegenau = 0;
582 /* R/W Size of local memory in cache blocks, 54 (6912 bytes)
583 * is max legal value. */
584 cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
585 583
586 write_c0_cvmmemctl(cvmmemctl.u64); 584 write_c0_cvmmemctl(cvmmemctl.u64);
587 585
586 /* Setup of CVMSEG is done in kernel-entry-init.h */
588 if (smp_processor_id() == 0) 587 if (smp_processor_id() == 0)
589 pr_notice("CVMSEG size: %d cache lines (%d bytes)\n", 588 pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
590 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, 589 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
@@ -615,6 +614,7 @@ void __init prom_init(void)
615 const char *arg; 614 const char *arg;
616 char *p; 615 char *p;
617 int i; 616 int i;
617 u64 t;
618 int argc; 618 int argc;
619#ifdef CONFIG_CAVIUM_RESERVE32 619#ifdef CONFIG_CAVIUM_RESERVE32
620 int64_t addr = -1; 620 int64_t addr = -1;
@@ -654,15 +654,56 @@ void __init prom_init(void)
654 sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz; 654 sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
655 sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags; 655 sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
656 656
657 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 657 if (OCTEON_IS_OCTEON2()) {
658 /* I/O clock runs at a different rate than the CPU. */ 658 /* I/O clock runs at a different rate than the CPU. */
659 union cvmx_mio_rst_boot rst_boot; 659 union cvmx_mio_rst_boot rst_boot;
660 rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); 660 rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
661 octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; 661 octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
662 } else if (OCTEON_IS_OCTEON3()) {
663 /* I/O clock runs at a different rate than the CPU. */
664 union cvmx_rst_boot rst_boot;
665 rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
666 octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
662 } else { 667 } else {
663 octeon_io_clock_rate = sysinfo->cpu_clock_hz; 668 octeon_io_clock_rate = sysinfo->cpu_clock_hz;
664 } 669 }
665 670
671 t = read_c0_cvmctl();
672 if ((t & (1ull << 27)) == 0) {
673 /*
674 * Setup the multiplier save/restore code if
675 * CvmCtl[NOMUL] clear.
676 */
677 void *save;
678 void *save_end;
679 void *restore;
680 void *restore_end;
681 int save_len;
682 int restore_len;
683 int save_max = (char *)octeon_mult_save_end -
684 (char *)octeon_mult_save;
685 int restore_max = (char *)octeon_mult_restore_end -
686 (char *)octeon_mult_restore;
687 if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) {
688 save = octeon_mult_save3;
689 save_end = octeon_mult_save3_end;
690 restore = octeon_mult_restore3;
691 restore_end = octeon_mult_restore3_end;
692 } else {
693 save = octeon_mult_save2;
694 save_end = octeon_mult_save2_end;
695 restore = octeon_mult_restore2;
696 restore_end = octeon_mult_restore2_end;
697 }
698 save_len = (char *)save_end - (char *)save;
699 restore_len = (char *)restore_end - (char *)restore;
700 if (!WARN_ON(save_len > save_max ||
701 restore_len > restore_max)) {
702 memcpy(octeon_mult_save, save, save_len);
703 memcpy(octeon_mult_restore, restore, restore_len);
704 }
705 }
706
666 /* 707 /*
667 * Only enable the LED controller if we're running on a CN38XX, CN58XX, 708 * Only enable the LED controller if we're running on a CN38XX, CN58XX,
668 * or CN56XX. The CN30XX and CN31XX don't have an LED controller. 709 * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
@@ -1004,7 +1045,7 @@ EXPORT_SYMBOL(prom_putchar);
1004 1045
1005void prom_free_prom_memory(void) 1046void prom_free_prom_memory(void)
1006{ 1047{
1007 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) { 1048 if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) {
1008 /* Check for presence of Core-14449 fix. */ 1049 /* Check for presence of Core-14449 fix. */
1009 u32 insn; 1050 u32 insn;
1010 u32 *foo; 1051 u32 *foo;
@@ -1026,8 +1067,9 @@ void prom_free_prom_memory(void)
1026 panic("No PREF instruction at Core-14449 probe point."); 1067 panic("No PREF instruction at Core-14449 probe point.");
1027 1068
1028 if (((insn >> 16) & 0x1f) != 28) 1069 if (((insn >> 16) & 0x1f) != 28)
1029 panic("Core-14449 WAR not in place (%04x).\n" 1070 panic("OCTEON II DCache prefetch workaround not in place (%04x).\n"
1030 "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn); 1071 "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).",
1072 insn);
1031 } 1073 }
1032} 1074}
1033 1075
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
new file mode 100644
index 000000000000..4bce1f8ebe98
--- /dev/null
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -0,0 +1,193 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R6=y
4CONFIG_PAGE_SIZE_16KB=y
5CONFIG_HZ_100=y
6CONFIG_SYSVIPC=y
7CONFIG_POSIX_MQUEUE=y
8CONFIG_AUDIT=y
9CONFIG_NO_HZ=y
10CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y
12CONFIG_LOG_BUF_SHIFT=15
13CONFIG_SYSCTL_SYSCALL=y
14CONFIG_EMBEDDED=y
15CONFIG_SLAB=y
16CONFIG_MODULES=y
17CONFIG_MODULE_UNLOAD=y
18CONFIG_MODVERSIONS=y
19CONFIG_MODULE_SRCVERSION_ALL=y
20# CONFIG_BLK_DEV_BSG is not set
21CONFIG_PCI=y
22# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
23CONFIG_NET=y
24CONFIG_PACKET=y
25CONFIG_UNIX=y
26CONFIG_XFRM_USER=m
27CONFIG_NET_KEY=y
28CONFIG_INET=y
29CONFIG_IP_MULTICAST=y
30CONFIG_IP_ADVANCED_ROUTER=y
31CONFIG_IP_MULTIPLE_TABLES=y
32CONFIG_IP_ROUTE_MULTIPATH=y
33CONFIG_IP_ROUTE_VERBOSE=y
34CONFIG_IP_PNP=y
35CONFIG_IP_PNP_DHCP=y
36CONFIG_IP_PNP_BOOTP=y
37CONFIG_NET_IPIP=m
38CONFIG_IP_MROUTE=y
39CONFIG_IP_PIMSM_V1=y
40CONFIG_IP_PIMSM_V2=y
41CONFIG_SYN_COOKIES=y
42CONFIG_INET_AH=m
43CONFIG_INET_ESP=m
44CONFIG_INET_IPCOMP=m
45# CONFIG_INET_LRO is not set
46CONFIG_INET6_AH=m
47CONFIG_INET6_ESP=m
48CONFIG_INET6_IPCOMP=m
49CONFIG_IPV6_TUNNEL=m
50CONFIG_BRIDGE=m
51CONFIG_VLAN_8021Q=m
52CONFIG_ATALK=m
53CONFIG_DEV_APPLETALK=m
54CONFIG_IPDDP=m
55CONFIG_IPDDP_ENCAP=y
56CONFIG_NET_SCHED=y
57CONFIG_NET_SCH_CBQ=m
58CONFIG_NET_SCH_HTB=m
59CONFIG_NET_SCH_HFSC=m
60CONFIG_NET_SCH_PRIO=m
61CONFIG_NET_SCH_RED=m
62CONFIG_NET_SCH_SFQ=m
63CONFIG_NET_SCH_TEQL=m
64CONFIG_NET_SCH_TBF=m
65CONFIG_NET_SCH_GRED=m
66CONFIG_NET_SCH_DSMARK=m
67CONFIG_NET_SCH_NETEM=m
68CONFIG_NET_SCH_INGRESS=m
69CONFIG_NET_CLS_BASIC=m
70CONFIG_NET_CLS_TCINDEX=m
71CONFIG_NET_CLS_ROUTE4=m
72CONFIG_NET_CLS_FW=m
73CONFIG_NET_CLS_U32=m
74CONFIG_NET_CLS_RSVP=m
75CONFIG_NET_CLS_RSVP6=m
76CONFIG_NET_CLS_ACT=y
77CONFIG_NET_ACT_POLICE=y
78CONFIG_NET_CLS_IND=y
79# CONFIG_WIRELESS is not set
80CONFIG_DEVTMPFS=y
81CONFIG_BLK_DEV_LOOP=y
82CONFIG_BLK_DEV_CRYPTOLOOP=m
83CONFIG_IDE=y
84# CONFIG_IDE_PROC_FS is not set
85# CONFIG_IDEPCI_PCIBUS_ORDER is not set
86CONFIG_BLK_DEV_GENERIC=y
87CONFIG_BLK_DEV_PIIX=y
88CONFIG_SCSI=y
89CONFIG_BLK_DEV_SD=y
90CONFIG_CHR_DEV_SG=y
91# CONFIG_SCSI_LOWLEVEL is not set
92CONFIG_NETDEVICES=y
93# CONFIG_NET_VENDOR_3COM is not set
94# CONFIG_NET_VENDOR_ADAPTEC is not set
95# CONFIG_NET_VENDOR_ALTEON is not set
96CONFIG_PCNET32=y
97# CONFIG_NET_VENDOR_ATHEROS is not set
98# CONFIG_NET_VENDOR_BROADCOM is not set
99# CONFIG_NET_VENDOR_BROCADE is not set
100# CONFIG_NET_VENDOR_CHELSIO is not set
101# CONFIG_NET_VENDOR_CISCO is not set
102# CONFIG_NET_VENDOR_DEC is not set
103# CONFIG_NET_VENDOR_DLINK is not set
104# CONFIG_NET_VENDOR_EMULEX is not set
105# CONFIG_NET_VENDOR_EXAR is not set
106# CONFIG_NET_VENDOR_HP is not set
107# CONFIG_NET_VENDOR_INTEL is not set
108# CONFIG_NET_VENDOR_MARVELL is not set
109# CONFIG_NET_VENDOR_MELLANOX is not set
110# CONFIG_NET_VENDOR_MICREL is not set
111# CONFIG_NET_VENDOR_MYRI is not set
112# CONFIG_NET_VENDOR_NATSEMI is not set
113# CONFIG_NET_VENDOR_NVIDIA is not set
114# CONFIG_NET_VENDOR_OKI is not set
115# CONFIG_NET_PACKET_ENGINE is not set
116# CONFIG_NET_VENDOR_QLOGIC is not set
117# CONFIG_NET_VENDOR_REALTEK is not set
118# CONFIG_NET_VENDOR_RDC is not set
119# CONFIG_NET_VENDOR_SEEQ is not set
120# CONFIG_NET_VENDOR_SILAN is not set
121# CONFIG_NET_VENDOR_SIS is not set
122# CONFIG_NET_VENDOR_SMSC is not set
123# CONFIG_NET_VENDOR_STMICRO is not set
124# CONFIG_NET_VENDOR_SUN is not set
125# CONFIG_NET_VENDOR_TEHUTI is not set
126# CONFIG_NET_VENDOR_TI is not set
127# CONFIG_NET_VENDOR_TOSHIBA is not set
128# CONFIG_NET_VENDOR_VIA is not set
129# CONFIG_NET_VENDOR_WIZNET is not set
130# CONFIG_WLAN is not set
131# CONFIG_VT is not set
132CONFIG_LEGACY_PTY_COUNT=4
133CONFIG_SERIAL_8250=y
134CONFIG_SERIAL_8250_CONSOLE=y
135CONFIG_HW_RANDOM=y
136# CONFIG_HWMON is not set
137CONFIG_FB=y
138CONFIG_FIRMWARE_EDID=y
139CONFIG_FB_MATROX=y
140CONFIG_FB_MATROX_G=y
141CONFIG_USB=y
142CONFIG_USB_EHCI_HCD=y
143# CONFIG_USB_EHCI_TT_NEWSCHED is not set
144CONFIG_USB_UHCI_HCD=y
145CONFIG_USB_STORAGE=y
146CONFIG_NEW_LEDS=y
147CONFIG_LEDS_CLASS=y
148CONFIG_LEDS_TRIGGERS=y
149CONFIG_LEDS_TRIGGER_TIMER=y
150CONFIG_LEDS_TRIGGER_IDE_DISK=y
151CONFIG_LEDS_TRIGGER_HEARTBEAT=y
152CONFIG_LEDS_TRIGGER_BACKLIGHT=y
153CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
154CONFIG_RTC_CLASS=y
155CONFIG_RTC_DRV_CMOS=y
156CONFIG_EXT2_FS=y
157CONFIG_EXT3_FS=y
158# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
159CONFIG_XFS_FS=y
160CONFIG_XFS_QUOTA=y
161CONFIG_XFS_POSIX_ACL=y
162CONFIG_QUOTA=y
163CONFIG_QFMT_V2=y
164CONFIG_MSDOS_FS=m
165CONFIG_VFAT_FS=m
166CONFIG_PROC_KCORE=y
167CONFIG_TMPFS=y
168CONFIG_NFS_FS=y
169CONFIG_ROOT_NFS=y
170CONFIG_CIFS=m
171CONFIG_CIFS_WEAK_PW_HASH=y
172CONFIG_CIFS_XATTR=y
173CONFIG_CIFS_POSIX=y
174CONFIG_NLS_CODEPAGE_437=m
175CONFIG_NLS_ISO8859_1=m
176# CONFIG_FTRACE is not set
177CONFIG_CRYPTO_NULL=m
178CONFIG_CRYPTO_PCBC=m
179CONFIG_CRYPTO_HMAC=y
180CONFIG_CRYPTO_MICHAEL_MIC=m
181CONFIG_CRYPTO_SHA512=m
182CONFIG_CRYPTO_TGR192=m
183CONFIG_CRYPTO_WP512=m
184CONFIG_CRYPTO_ANUBIS=m
185CONFIG_CRYPTO_BLOWFISH=m
186CONFIG_CRYPTO_CAST5=m
187CONFIG_CRYPTO_CAST6=m
188CONFIG_CRYPTO_KHAZAD=m
189CONFIG_CRYPTO_SERPENT=m
190CONFIG_CRYPTO_TEA=m
191CONFIG_CRYPTO_TWOFISH=m
192# CONFIG_CRYPTO_ANSI_CPRNG is not set
193# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/fw/arc/misc.c b/arch/mips/fw/arc/misc.c
index f9f5307434c2..19f710117d97 100644
--- a/arch/mips/fw/arc/misc.c
+++ b/arch/mips/fw/arc/misc.c
@@ -9,6 +9,7 @@
9 * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) 9 * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
10 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 */ 11 */
12#include <linux/compiler.h>
12#include <linux/init.h> 13#include <linux/init.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/irqflags.h> 15#include <linux/irqflags.h>
@@ -19,50 +20,55 @@
19#include <asm/sgialib.h> 20#include <asm/sgialib.h>
20#include <asm/bootinfo.h> 21#include <asm/bootinfo.h>
21 22
22VOID 23VOID __noreturn
23ArcHalt(VOID) 24ArcHalt(VOID)
24{ 25{
25 bc_disable(); 26 bc_disable();
26 local_irq_disable(); 27 local_irq_disable();
27 ARC_CALL0(halt); 28 ARC_CALL0(halt);
28never: goto never; 29
30 unreachable();
29} 31}
30 32
31VOID 33VOID __noreturn
32ArcPowerDown(VOID) 34ArcPowerDown(VOID)
33{ 35{
34 bc_disable(); 36 bc_disable();
35 local_irq_disable(); 37 local_irq_disable();
36 ARC_CALL0(pdown); 38 ARC_CALL0(pdown);
37never: goto never; 39
40 unreachable();
38} 41}
39 42
40/* XXX is this a soft reset basically? XXX */ 43/* XXX is this a soft reset basically? XXX */
41VOID 44VOID __noreturn
42ArcRestart(VOID) 45ArcRestart(VOID)
43{ 46{
44 bc_disable(); 47 bc_disable();
45 local_irq_disable(); 48 local_irq_disable();
46 ARC_CALL0(restart); 49 ARC_CALL0(restart);
47never: goto never; 50
51 unreachable();
48} 52}
49 53
50VOID 54VOID __noreturn
51ArcReboot(VOID) 55ArcReboot(VOID)
52{ 56{
53 bc_disable(); 57 bc_disable();
54 local_irq_disable(); 58 local_irq_disable();
55 ARC_CALL0(reboot); 59 ARC_CALL0(reboot);
56never: goto never; 60
61 unreachable();
57} 62}
58 63
59VOID 64VOID __noreturn
60ArcEnterInteractiveMode(VOID) 65ArcEnterInteractiveMode(VOID)
61{ 66{
62 bc_disable(); 67 bc_disable();
63 local_irq_disable(); 68 local_irq_disable();
64 ARC_CALL0(imode); 69 ARC_CALL0(imode);
65never: goto never; 70
71 unreachable();
66} 72}
67 73
68LONG 74LONG
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 200efeac4181..526539cbc99f 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -1,4 +1,5 @@
1# MIPS headers 1# MIPS headers
2generic-(CONFIG_GENERIC_CSUM) += checksum.h
2generic-y += cputime.h 3generic-y += cputime.h
3generic-y += current.h 4generic-y += current.h
4generic-y += dma-contiguous.h 5generic-y += dma-contiguous.h
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 6caf8766b80f..0cae4595e985 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -19,7 +19,7 @@
19#include <asm/asmmacro-64.h> 19#include <asm/asmmacro-64.h>
20#endif 20#endif
21 21
22#ifdef CONFIG_CPU_MIPSR2 22#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
23 .macro local_irq_enable reg=t0 23 .macro local_irq_enable reg=t0
24 ei 24 ei
25 irq_enable_hazard 25 irq_enable_hazard
@@ -104,7 +104,8 @@
104 .endm 104 .endm
105 105
106 .macro fpu_save_double thread status tmp 106 .macro fpu_save_double thread status tmp
107#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 107#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
108 defined(CONFIG_CPU_MIPS32_R6)
108 sll \tmp, \status, 5 109 sll \tmp, \status, 5
109 bgez \tmp, 10f 110 bgez \tmp, 10f
110 fpu_save_16odd \thread 111 fpu_save_16odd \thread
@@ -160,7 +161,8 @@
160 .endm 161 .endm
161 162
162 .macro fpu_restore_double thread status tmp 163 .macro fpu_restore_double thread status tmp
163#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 164#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
165 defined(CONFIG_CPU_MIPS32_R6)
164 sll \tmp, \status, 5 166 sll \tmp, \status, 5
165 bgez \tmp, 10f # 16 register mode? 167 bgez \tmp, 10f # 16 register mode?
166 168
@@ -170,16 +172,16 @@
170 fpu_restore_16even \thread \tmp 172 fpu_restore_16even \thread \tmp
171 .endm 173 .endm
172 174
173#ifdef CONFIG_CPU_MIPSR2 175#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
174 .macro _EXT rd, rs, p, s 176 .macro _EXT rd, rs, p, s
175 ext \rd, \rs, \p, \s 177 ext \rd, \rs, \p, \s
176 .endm 178 .endm
177#else /* !CONFIG_CPU_MIPSR2 */ 179#else /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
178 .macro _EXT rd, rs, p, s 180 .macro _EXT rd, rs, p, s
179 srl \rd, \rs, \p 181 srl \rd, \rs, \p
180 andi \rd, \rd, (1 << \s) - 1 182 andi \rd, \rd, (1 << \s) - 1
181 .endm 183 .endm
182#endif /* !CONFIG_CPU_MIPSR2 */ 184#endif /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
183 185
184/* 186/*
185 * Temporary until all gas have MT ASE support 187 * Temporary until all gas have MT ASE support
@@ -304,7 +306,7 @@
304 .set push 306 .set push
305 .set noat 307 .set noat
306 SET_HARDFLOAT 308 SET_HARDFLOAT
307 add $1, \base, \off 309 addu $1, \base, \off
308 .word LDD_MSA_INSN | (\wd << 6) 310 .word LDD_MSA_INSN | (\wd << 6)
309 .set pop 311 .set pop
310 .endm 312 .endm
@@ -313,7 +315,7 @@
313 .set push 315 .set push
314 .set noat 316 .set noat
315 SET_HARDFLOAT 317 SET_HARDFLOAT
316 add $1, \base, \off 318 addu $1, \base, \off
317 .word STD_MSA_INSN | (\wd << 6) 319 .word STD_MSA_INSN | (\wd << 6)
318 .set pop 320 .set pop
319 .endm 321 .endm
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 857da84cfc92..26d436336f2e 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -54,19 +54,19 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
54 " sc %0, %1 \n" \ 54 " sc %0, %1 \n" \
55 " beqzl %0, 1b \n" \ 55 " beqzl %0, 1b \n" \
56 " .set mips0 \n" \ 56 " .set mips0 \n" \
57 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 57 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
58 : "Ir" (i)); \ 58 : "Ir" (i)); \
59 } else if (kernel_uses_llsc) { \ 59 } else if (kernel_uses_llsc) { \
60 int temp; \ 60 int temp; \
61 \ 61 \
62 do { \ 62 do { \
63 __asm__ __volatile__( \ 63 __asm__ __volatile__( \
64 " .set arch=r4000 \n" \ 64 " .set "MIPS_ISA_LEVEL" \n" \
65 " ll %0, %1 # atomic_" #op "\n" \ 65 " ll %0, %1 # atomic_" #op "\n" \
66 " " #asm_op " %0, %2 \n" \ 66 " " #asm_op " %0, %2 \n" \
67 " sc %0, %1 \n" \ 67 " sc %0, %1 \n" \
68 " .set mips0 \n" \ 68 " .set mips0 \n" \
69 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 69 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
70 : "Ir" (i)); \ 70 : "Ir" (i)); \
71 } while (unlikely(!temp)); \ 71 } while (unlikely(!temp)); \
72 } else { \ 72 } else { \
@@ -97,20 +97,20 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
97 " " #asm_op " %0, %1, %3 \n" \ 97 " " #asm_op " %0, %1, %3 \n" \
98 " .set mips0 \n" \ 98 " .set mips0 \n" \
99 : "=&r" (result), "=&r" (temp), \ 99 : "=&r" (result), "=&r" (temp), \
100 "+" GCC_OFF12_ASM() (v->counter) \ 100 "+" GCC_OFF_SMALL_ASM() (v->counter) \
101 : "Ir" (i)); \ 101 : "Ir" (i)); \
102 } else if (kernel_uses_llsc) { \ 102 } else if (kernel_uses_llsc) { \
103 int temp; \ 103 int temp; \
104 \ 104 \
105 do { \ 105 do { \
106 __asm__ __volatile__( \ 106 __asm__ __volatile__( \
107 " .set arch=r4000 \n" \ 107 " .set "MIPS_ISA_LEVEL" \n" \
108 " ll %1, %2 # atomic_" #op "_return \n" \ 108 " ll %1, %2 # atomic_" #op "_return \n" \
109 " " #asm_op " %0, %1, %3 \n" \ 109 " " #asm_op " %0, %1, %3 \n" \
110 " sc %0, %2 \n" \ 110 " sc %0, %2 \n" \
111 " .set mips0 \n" \ 111 " .set mips0 \n" \
112 : "=&r" (result), "=&r" (temp), \ 112 : "=&r" (result), "=&r" (temp), \
113 "+" GCC_OFF12_ASM() (v->counter) \ 113 "+" GCC_OFF_SMALL_ASM() (v->counter) \
114 : "Ir" (i)); \ 114 : "Ir" (i)); \
115 } while (unlikely(!result)); \ 115 } while (unlikely(!result)); \
116 \ 116 \
@@ -171,14 +171,14 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
171 "1: \n" 171 "1: \n"
172 " .set mips0 \n" 172 " .set mips0 \n"
173 : "=&r" (result), "=&r" (temp), 173 : "=&r" (result), "=&r" (temp),
174 "+" GCC_OFF12_ASM() (v->counter) 174 "+" GCC_OFF_SMALL_ASM() (v->counter)
175 : "Ir" (i), GCC_OFF12_ASM() (v->counter) 175 : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
176 : "memory"); 176 : "memory");
177 } else if (kernel_uses_llsc) { 177 } else if (kernel_uses_llsc) {
178 int temp; 178 int temp;
179 179
180 __asm__ __volatile__( 180 __asm__ __volatile__(
181 " .set arch=r4000 \n" 181 " .set "MIPS_ISA_LEVEL" \n"
182 "1: ll %1, %2 # atomic_sub_if_positive\n" 182 "1: ll %1, %2 # atomic_sub_if_positive\n"
183 " subu %0, %1, %3 \n" 183 " subu %0, %1, %3 \n"
184 " bltz %0, 1f \n" 184 " bltz %0, 1f \n"
@@ -190,7 +190,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
190 "1: \n" 190 "1: \n"
191 " .set mips0 \n" 191 " .set mips0 \n"
192 : "=&r" (result), "=&r" (temp), 192 : "=&r" (result), "=&r" (temp),
193 "+" GCC_OFF12_ASM() (v->counter) 193 "+" GCC_OFF_SMALL_ASM() (v->counter)
194 : "Ir" (i)); 194 : "Ir" (i));
195 } else { 195 } else {
196 unsigned long flags; 196 unsigned long flags;
@@ -333,19 +333,19 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
333 " scd %0, %1 \n" \ 333 " scd %0, %1 \n" \
334 " beqzl %0, 1b \n" \ 334 " beqzl %0, 1b \n" \
335 " .set mips0 \n" \ 335 " .set mips0 \n" \
336 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 336 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
337 : "Ir" (i)); \ 337 : "Ir" (i)); \
338 } else if (kernel_uses_llsc) { \ 338 } else if (kernel_uses_llsc) { \
339 long temp; \ 339 long temp; \
340 \ 340 \
341 do { \ 341 do { \
342 __asm__ __volatile__( \ 342 __asm__ __volatile__( \
343 " .set arch=r4000 \n" \ 343 " .set "MIPS_ISA_LEVEL" \n" \
344 " lld %0, %1 # atomic64_" #op "\n" \ 344 " lld %0, %1 # atomic64_" #op "\n" \
345 " " #asm_op " %0, %2 \n" \ 345 " " #asm_op " %0, %2 \n" \
346 " scd %0, %1 \n" \ 346 " scd %0, %1 \n" \
347 " .set mips0 \n" \ 347 " .set mips0 \n" \
348 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 348 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
349 : "Ir" (i)); \ 349 : "Ir" (i)); \
350 } while (unlikely(!temp)); \ 350 } while (unlikely(!temp)); \
351 } else { \ 351 } else { \
@@ -376,21 +376,21 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
376 " " #asm_op " %0, %1, %3 \n" \ 376 " " #asm_op " %0, %1, %3 \n" \
377 " .set mips0 \n" \ 377 " .set mips0 \n" \
378 : "=&r" (result), "=&r" (temp), \ 378 : "=&r" (result), "=&r" (temp), \
379 "+" GCC_OFF12_ASM() (v->counter) \ 379 "+" GCC_OFF_SMALL_ASM() (v->counter) \
380 : "Ir" (i)); \ 380 : "Ir" (i)); \
381 } else if (kernel_uses_llsc) { \ 381 } else if (kernel_uses_llsc) { \
382 long temp; \ 382 long temp; \
383 \ 383 \
384 do { \ 384 do { \
385 __asm__ __volatile__( \ 385 __asm__ __volatile__( \
386 " .set arch=r4000 \n" \ 386 " .set "MIPS_ISA_LEVEL" \n" \
387 " lld %1, %2 # atomic64_" #op "_return\n" \ 387 " lld %1, %2 # atomic64_" #op "_return\n" \
388 " " #asm_op " %0, %1, %3 \n" \ 388 " " #asm_op " %0, %1, %3 \n" \
389 " scd %0, %2 \n" \ 389 " scd %0, %2 \n" \
390 " .set mips0 \n" \ 390 " .set mips0 \n" \
391 : "=&r" (result), "=&r" (temp), \ 391 : "=&r" (result), "=&r" (temp), \
392 "=" GCC_OFF12_ASM() (v->counter) \ 392 "=" GCC_OFF_SMALL_ASM() (v->counter) \
393 : "Ir" (i), GCC_OFF12_ASM() (v->counter) \ 393 : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
394 : "memory"); \ 394 : "memory"); \
395 } while (unlikely(!result)); \ 395 } while (unlikely(!result)); \
396 \ 396 \
@@ -452,14 +452,14 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
452 "1: \n" 452 "1: \n"
453 " .set mips0 \n" 453 " .set mips0 \n"
454 : "=&r" (result), "=&r" (temp), 454 : "=&r" (result), "=&r" (temp),
455 "=" GCC_OFF12_ASM() (v->counter) 455 "=" GCC_OFF_SMALL_ASM() (v->counter)
456 : "Ir" (i), GCC_OFF12_ASM() (v->counter) 456 : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
457 : "memory"); 457 : "memory");
458 } else if (kernel_uses_llsc) { 458 } else if (kernel_uses_llsc) {
459 long temp; 459 long temp;
460 460
461 __asm__ __volatile__( 461 __asm__ __volatile__(
462 " .set arch=r4000 \n" 462 " .set "MIPS_ISA_LEVEL" \n"
463 "1: lld %1, %2 # atomic64_sub_if_positive\n" 463 "1: lld %1, %2 # atomic64_sub_if_positive\n"
464 " dsubu %0, %1, %3 \n" 464 " dsubu %0, %1, %3 \n"
465 " bltz %0, 1f \n" 465 " bltz %0, 1f \n"
@@ -471,7 +471,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
471 "1: \n" 471 "1: \n"
472 " .set mips0 \n" 472 " .set mips0 \n"
473 : "=&r" (result), "=&r" (temp), 473 : "=&r" (result), "=&r" (temp),
474 "+" GCC_OFF12_ASM() (v->counter) 474 "+" GCC_OFF_SMALL_ASM() (v->counter)
475 : "Ir" (i)); 475 : "Ir" (i));
476 } else { 476 } else {
477 unsigned long flags; 477 unsigned long flags;
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 6663bcca9d0c..9f935f6aa996 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -79,28 +79,28 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
79 " " __SC "%0, %1 \n" 79 " " __SC "%0, %1 \n"
80 " beqzl %0, 1b \n" 80 " beqzl %0, 1b \n"
81 " .set mips0 \n" 81 " .set mips0 \n"
82 : "=&r" (temp), "=" GCC_OFF12_ASM() (*m) 82 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
83 : "ir" (1UL << bit), GCC_OFF12_ASM() (*m)); 83 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
84#ifdef CONFIG_CPU_MIPSR2 84#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
85 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 85 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
86 do { 86 do {
87 __asm__ __volatile__( 87 __asm__ __volatile__(
88 " " __LL "%0, %1 # set_bit \n" 88 " " __LL "%0, %1 # set_bit \n"
89 " " __INS "%0, %3, %2, 1 \n" 89 " " __INS "%0, %3, %2, 1 \n"
90 " " __SC "%0, %1 \n" 90 " " __SC "%0, %1 \n"
91 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 91 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
92 : "ir" (bit), "r" (~0)); 92 : "ir" (bit), "r" (~0));
93 } while (unlikely(!temp)); 93 } while (unlikely(!temp));
94#endif /* CONFIG_CPU_MIPSR2 */ 94#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
95 } else if (kernel_uses_llsc) { 95 } else if (kernel_uses_llsc) {
96 do { 96 do {
97 __asm__ __volatile__( 97 __asm__ __volatile__(
98 " .set arch=r4000 \n" 98 " .set "MIPS_ISA_ARCH_LEVEL" \n"
99 " " __LL "%0, %1 # set_bit \n" 99 " " __LL "%0, %1 # set_bit \n"
100 " or %0, %2 \n" 100 " or %0, %2 \n"
101 " " __SC "%0, %1 \n" 101 " " __SC "%0, %1 \n"
102 " .set mips0 \n" 102 " .set mips0 \n"
103 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 103 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
104 : "ir" (1UL << bit)); 104 : "ir" (1UL << bit));
105 } while (unlikely(!temp)); 105 } while (unlikely(!temp));
106 } else 106 } else
@@ -131,28 +131,28 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
131 " " __SC "%0, %1 \n" 131 " " __SC "%0, %1 \n"
132 " beqzl %0, 1b \n" 132 " beqzl %0, 1b \n"
133 " .set mips0 \n" 133 " .set mips0 \n"
134 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 134 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
135 : "ir" (~(1UL << bit))); 135 : "ir" (~(1UL << bit)));
136#ifdef CONFIG_CPU_MIPSR2 136#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
137 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 137 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
138 do { 138 do {
139 __asm__ __volatile__( 139 __asm__ __volatile__(
140 " " __LL "%0, %1 # clear_bit \n" 140 " " __LL "%0, %1 # clear_bit \n"
141 " " __INS "%0, $0, %2, 1 \n" 141 " " __INS "%0, $0, %2, 1 \n"
142 " " __SC "%0, %1 \n" 142 " " __SC "%0, %1 \n"
143 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 143 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
144 : "ir" (bit)); 144 : "ir" (bit));
145 } while (unlikely(!temp)); 145 } while (unlikely(!temp));
146#endif /* CONFIG_CPU_MIPSR2 */ 146#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
147 } else if (kernel_uses_llsc) { 147 } else if (kernel_uses_llsc) {
148 do { 148 do {
149 __asm__ __volatile__( 149 __asm__ __volatile__(
150 " .set arch=r4000 \n" 150 " .set "MIPS_ISA_ARCH_LEVEL" \n"
151 " " __LL "%0, %1 # clear_bit \n" 151 " " __LL "%0, %1 # clear_bit \n"
152 " and %0, %2 \n" 152 " and %0, %2 \n"
153 " " __SC "%0, %1 \n" 153 " " __SC "%0, %1 \n"
154 " .set mips0 \n" 154 " .set mips0 \n"
155 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 155 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
156 : "ir" (~(1UL << bit))); 156 : "ir" (~(1UL << bit)));
157 } while (unlikely(!temp)); 157 } while (unlikely(!temp));
158 } else 158 } else
@@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
197 " " __SC "%0, %1 \n" 197 " " __SC "%0, %1 \n"
198 " beqzl %0, 1b \n" 198 " beqzl %0, 1b \n"
199 " .set mips0 \n" 199 " .set mips0 \n"
200 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 200 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
201 : "ir" (1UL << bit)); 201 : "ir" (1UL << bit));
202 } else if (kernel_uses_llsc) { 202 } else if (kernel_uses_llsc) {
203 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 203 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -205,12 +205,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
205 205
206 do { 206 do {
207 __asm__ __volatile__( 207 __asm__ __volatile__(
208 " .set arch=r4000 \n" 208 " .set "MIPS_ISA_ARCH_LEVEL" \n"
209 " " __LL "%0, %1 # change_bit \n" 209 " " __LL "%0, %1 # change_bit \n"
210 " xor %0, %2 \n" 210 " xor %0, %2 \n"
211 " " __SC "%0, %1 \n" 211 " " __SC "%0, %1 \n"
212 " .set mips0 \n" 212 " .set mips0 \n"
213 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 213 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
214 : "ir" (1UL << bit)); 214 : "ir" (1UL << bit));
215 } while (unlikely(!temp)); 215 } while (unlikely(!temp));
216 } else 216 } else
@@ -245,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr,
245 " beqzl %2, 1b \n" 245 " beqzl %2, 1b \n"
246 " and %2, %0, %3 \n" 246 " and %2, %0, %3 \n"
247 " .set mips0 \n" 247 " .set mips0 \n"
248 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 248 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
249 : "r" (1UL << bit) 249 : "r" (1UL << bit)
250 : "memory"); 250 : "memory");
251 } else if (kernel_uses_llsc) { 251 } else if (kernel_uses_llsc) {
@@ -254,12 +254,12 @@ static inline int test_and_set_bit(unsigned long nr,
254 254
255 do { 255 do {
256 __asm__ __volatile__( 256 __asm__ __volatile__(
257 " .set arch=r4000 \n" 257 " .set "MIPS_ISA_ARCH_LEVEL" \n"
258 " " __LL "%0, %1 # test_and_set_bit \n" 258 " " __LL "%0, %1 # test_and_set_bit \n"
259 " or %2, %0, %3 \n" 259 " or %2, %0, %3 \n"
260 " " __SC "%2, %1 \n" 260 " " __SC "%2, %1 \n"
261 " .set mips0 \n" 261 " .set mips0 \n"
262 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 262 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
263 : "r" (1UL << bit) 263 : "r" (1UL << bit)
264 : "memory"); 264 : "memory");
265 } while (unlikely(!res)); 265 } while (unlikely(!res));
@@ -308,12 +308,12 @@ static inline int test_and_set_bit_lock(unsigned long nr,
308 308
309 do { 309 do {
310 __asm__ __volatile__( 310 __asm__ __volatile__(
311 " .set arch=r4000 \n" 311 " .set "MIPS_ISA_ARCH_LEVEL" \n"
312 " " __LL "%0, %1 # test_and_set_bit \n" 312 " " __LL "%0, %1 # test_and_set_bit \n"
313 " or %2, %0, %3 \n" 313 " or %2, %0, %3 \n"
314 " " __SC "%2, %1 \n" 314 " " __SC "%2, %1 \n"
315 " .set mips0 \n" 315 " .set mips0 \n"
316 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 316 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
317 : "r" (1UL << bit) 317 : "r" (1UL << bit)
318 : "memory"); 318 : "memory");
319 } while (unlikely(!res)); 319 } while (unlikely(!res));
@@ -355,10 +355,10 @@ static inline int test_and_clear_bit(unsigned long nr,
355 " beqzl %2, 1b \n" 355 " beqzl %2, 1b \n"
356 " and %2, %0, %3 \n" 356 " and %2, %0, %3 \n"
357 " .set mips0 \n" 357 " .set mips0 \n"
358 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 358 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
359 : "r" (1UL << bit) 359 : "r" (1UL << bit)
360 : "memory"); 360 : "memory");
361#ifdef CONFIG_CPU_MIPSR2 361#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
362 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) { 362 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
363 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 363 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
364 unsigned long temp; 364 unsigned long temp;
@@ -369,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr,
369 " " __EXT "%2, %0, %3, 1 \n" 369 " " __EXT "%2, %0, %3, 1 \n"
370 " " __INS "%0, $0, %3, 1 \n" 370 " " __INS "%0, $0, %3, 1 \n"
371 " " __SC "%0, %1 \n" 371 " " __SC "%0, %1 \n"
372 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 372 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
373 : "ir" (bit) 373 : "ir" (bit)
374 : "memory"); 374 : "memory");
375 } while (unlikely(!temp)); 375 } while (unlikely(!temp));
@@ -380,13 +380,13 @@ static inline int test_and_clear_bit(unsigned long nr,
380 380
381 do { 381 do {
382 __asm__ __volatile__( 382 __asm__ __volatile__(
383 " .set arch=r4000 \n" 383 " .set "MIPS_ISA_ARCH_LEVEL" \n"
384 " " __LL "%0, %1 # test_and_clear_bit \n" 384 " " __LL "%0, %1 # test_and_clear_bit \n"
385 " or %2, %0, %3 \n" 385 " or %2, %0, %3 \n"
386 " xor %2, %3 \n" 386 " xor %2, %3 \n"
387 " " __SC "%2, %1 \n" 387 " " __SC "%2, %1 \n"
388 " .set mips0 \n" 388 " .set mips0 \n"
389 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 389 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
390 : "r" (1UL << bit) 390 : "r" (1UL << bit)
391 : "memory"); 391 : "memory");
392 } while (unlikely(!res)); 392 } while (unlikely(!res));
@@ -428,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr,
428 " beqzl %2, 1b \n" 428 " beqzl %2, 1b \n"
429 " and %2, %0, %3 \n" 429 " and %2, %0, %3 \n"
430 " .set mips0 \n" 430 " .set mips0 \n"
431 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 431 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
432 : "r" (1UL << bit) 432 : "r" (1UL << bit)
433 : "memory"); 433 : "memory");
434 } else if (kernel_uses_llsc) { 434 } else if (kernel_uses_llsc) {
@@ -437,12 +437,12 @@ static inline int test_and_change_bit(unsigned long nr,
437 437
438 do { 438 do {
439 __asm__ __volatile__( 439 __asm__ __volatile__(
440 " .set arch=r4000 \n" 440 " .set "MIPS_ISA_ARCH_LEVEL" \n"
441 " " __LL "%0, %1 # test_and_change_bit \n" 441 " " __LL "%0, %1 # test_and_change_bit \n"
442 " xor %2, %0, %3 \n" 442 " xor %2, %0, %3 \n"
443 " " __SC "\t%2, %1 \n" 443 " " __SC "\t%2, %1 \n"
444 " .set mips0 \n" 444 " .set mips0 \n"
445 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 445 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
446 : "r" (1UL << bit) 446 : "r" (1UL << bit)
447 : "memory"); 447 : "memory");
448 } while (unlikely(!res)); 448 } while (unlikely(!res));
@@ -485,7 +485,7 @@ static inline unsigned long __fls(unsigned long word)
485 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { 485 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
486 __asm__( 486 __asm__(
487 " .set push \n" 487 " .set push \n"
488 " .set mips32 \n" 488 " .set "MIPS_ISA_LEVEL" \n"
489 " clz %0, %1 \n" 489 " clz %0, %1 \n"
490 " .set pop \n" 490 " .set pop \n"
491 : "=r" (num) 491 : "=r" (num)
@@ -498,7 +498,7 @@ static inline unsigned long __fls(unsigned long word)
498 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) { 498 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
499 __asm__( 499 __asm__(
500 " .set push \n" 500 " .set push \n"
501 " .set mips64 \n" 501 " .set "MIPS_ISA_LEVEL" \n"
502 " dclz %0, %1 \n" 502 " dclz %0, %1 \n"
503 " .set pop \n" 503 " .set pop \n"
504 : "=r" (num) 504 : "=r" (num)
@@ -562,7 +562,7 @@ static inline int fls(int x)
562 if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { 562 if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
563 __asm__( 563 __asm__(
564 " .set push \n" 564 " .set push \n"
565 " .set mips32 \n" 565 " .set "MIPS_ISA_LEVEL" \n"
566 " clz %0, %1 \n" 566 " clz %0, %1 \n"
567 " .set pop \n" 567 " .set pop \n"
568 : "=r" (x) 568 : "=r" (x)
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index 3418c51e1151..5c585c5c1c3e 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -12,6 +12,10 @@
12#ifndef _ASM_CHECKSUM_H 12#ifndef _ASM_CHECKSUM_H
13#define _ASM_CHECKSUM_H 13#define _ASM_CHECKSUM_H
14 14
15#ifdef CONFIG_GENERIC_CSUM
16#include <asm-generic/checksum.h>
17#else
18
15#include <linux/in6.h> 19#include <linux/in6.h>
16 20
17#include <asm/uaccess.h> 21#include <asm/uaccess.h>
@@ -99,27 +103,23 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
99 */ 103 */
100__wsum csum_partial_copy_nocheck(const void *src, void *dst, 104__wsum csum_partial_copy_nocheck(const void *src, void *dst,
101 int len, __wsum sum); 105 int len, __wsum sum);
106#define csum_partial_copy_nocheck csum_partial_copy_nocheck
102 107
103/* 108/*
104 * Fold a partial checksum without adding pseudo headers 109 * Fold a partial checksum without adding pseudo headers
105 */ 110 */
106static inline __sum16 csum_fold(__wsum sum) 111static inline __sum16 csum_fold(__wsum csum)
107{ 112{
108 __asm__( 113 u32 sum = (__force u32)csum;;
109 " .set push # csum_fold\n"
110 " .set noat \n"
111 " sll $1, %0, 16 \n"
112 " addu %0, $1 \n"
113 " sltu $1, %0, $1 \n"
114 " srl %0, %0, 16 \n"
115 " addu %0, $1 \n"
116 " xori %0, 0xffff \n"
117 " .set pop"
118 : "=r" (sum)
119 : "0" (sum));
120 114
121 return (__force __sum16)sum; 115 sum += (sum << 16);
116 csum = (sum < csum);
117 sum >>= 16;
118 sum += csum;
119
120 return (__force __sum16)~sum;
122} 121}
122#define csum_fold csum_fold
123 123
124/* 124/*
125 * This is a version of ip_compute_csum() optimized for IP headers, 125 * This is a version of ip_compute_csum() optimized for IP headers,
@@ -158,6 +158,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
158 158
159 return csum_fold(csum); 159 return csum_fold(csum);
160} 160}
161#define ip_fast_csum ip_fast_csum
161 162
162static inline __wsum csum_tcpudp_nofold(__be32 saddr, 163static inline __wsum csum_tcpudp_nofold(__be32 saddr,
163 __be32 daddr, unsigned short len, unsigned short proto, 164 __be32 daddr, unsigned short len, unsigned short proto,
@@ -200,18 +201,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr,
200 201
201 return sum; 202 return sum;
202} 203}
203 204#define csum_tcpudp_nofold csum_tcpudp_nofold
204/*
205 * computes the checksum of the TCP/UDP pseudo-header
206 * returns a 16-bit checksum, already complemented
207 */
208static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
209 unsigned short len,
210 unsigned short proto,
211 __wsum sum)
212{
213 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
214}
215 205
216/* 206/*
217 * this routine is used for miscellaneous IP-like checksums, mainly 207 * this routine is used for miscellaneous IP-like checksums, mainly
@@ -287,4 +277,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
287 return csum_fold(sum); 277 return csum_fold(sum);
288} 278}
289 279
280#include <asm-generic/checksum.h>
281#endif /* CONFIG_GENERIC_CSUM */
282
290#endif /* _ASM_CHECKSUM_H */ 283#endif /* _ASM_CHECKSUM_H */
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 28b1edf19501..d0a2a68ca600 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -31,24 +31,24 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
31 " sc %2, %1 \n" 31 " sc %2, %1 \n"
32 " beqzl %2, 1b \n" 32 " beqzl %2, 1b \n"
33 " .set mips0 \n" 33 " .set mips0 \n"
34 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) 34 : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
35 : GCC_OFF12_ASM() (*m), "Jr" (val) 35 : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
36 : "memory"); 36 : "memory");
37 } else if (kernel_uses_llsc) { 37 } else if (kernel_uses_llsc) {
38 unsigned long dummy; 38 unsigned long dummy;
39 39
40 do { 40 do {
41 __asm__ __volatile__( 41 __asm__ __volatile__(
42 " .set arch=r4000 \n" 42 " .set "MIPS_ISA_ARCH_LEVEL" \n"
43 " ll %0, %3 # xchg_u32 \n" 43 " ll %0, %3 # xchg_u32 \n"
44 " .set mips0 \n" 44 " .set mips0 \n"
45 " move %2, %z4 \n" 45 " move %2, %z4 \n"
46 " .set arch=r4000 \n" 46 " .set "MIPS_ISA_ARCH_LEVEL" \n"
47 " sc %2, %1 \n" 47 " sc %2, %1 \n"
48 " .set mips0 \n" 48 " .set mips0 \n"
49 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), 49 : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
50 "=&r" (dummy) 50 "=&r" (dummy)
51 : GCC_OFF12_ASM() (*m), "Jr" (val) 51 : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
52 : "memory"); 52 : "memory");
53 } while (unlikely(!dummy)); 53 } while (unlikely(!dummy));
54 } else { 54 } else {
@@ -82,22 +82,22 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
82 " scd %2, %1 \n" 82 " scd %2, %1 \n"
83 " beqzl %2, 1b \n" 83 " beqzl %2, 1b \n"
84 " .set mips0 \n" 84 " .set mips0 \n"
85 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) 85 : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
86 : GCC_OFF12_ASM() (*m), "Jr" (val) 86 : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
87 : "memory"); 87 : "memory");
88 } else if (kernel_uses_llsc) { 88 } else if (kernel_uses_llsc) {
89 unsigned long dummy; 89 unsigned long dummy;
90 90
91 do { 91 do {
92 __asm__ __volatile__( 92 __asm__ __volatile__(
93 " .set arch=r4000 \n" 93 " .set "MIPS_ISA_ARCH_LEVEL" \n"
94 " lld %0, %3 # xchg_u64 \n" 94 " lld %0, %3 # xchg_u64 \n"
95 " move %2, %z4 \n" 95 " move %2, %z4 \n"
96 " scd %2, %1 \n" 96 " scd %2, %1 \n"
97 " .set mips0 \n" 97 " .set mips0 \n"
98 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), 98 : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
99 "=&r" (dummy) 99 "=&r" (dummy)
100 : GCC_OFF12_ASM() (*m), "Jr" (val) 100 : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
101 : "memory"); 101 : "memory");
102 } while (unlikely(!dummy)); 102 } while (unlikely(!dummy));
103 } else { 103 } else {
@@ -158,25 +158,25 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
158 " beqzl $1, 1b \n" \ 158 " beqzl $1, 1b \n" \
159 "2: \n" \ 159 "2: \n" \
160 " .set pop \n" \ 160 " .set pop \n" \
161 : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ 161 : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
162 : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ 162 : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
163 : "memory"); \ 163 : "memory"); \
164 } else if (kernel_uses_llsc) { \ 164 } else if (kernel_uses_llsc) { \
165 __asm__ __volatile__( \ 165 __asm__ __volatile__( \
166 " .set push \n" \ 166 " .set push \n" \
167 " .set noat \n" \ 167 " .set noat \n" \
168 " .set arch=r4000 \n" \ 168 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
169 "1: " ld " %0, %2 # __cmpxchg_asm \n" \ 169 "1: " ld " %0, %2 # __cmpxchg_asm \n" \
170 " bne %0, %z3, 2f \n" \ 170 " bne %0, %z3, 2f \n" \
171 " .set mips0 \n" \ 171 " .set mips0 \n" \
172 " move $1, %z4 \n" \ 172 " move $1, %z4 \n" \
173 " .set arch=r4000 \n" \ 173 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
174 " " st " $1, %1 \n" \ 174 " " st " $1, %1 \n" \
175 " beqz $1, 1b \n" \ 175 " beqz $1, 1b \n" \
176 " .set pop \n" \ 176 " .set pop \n" \
177 "2: \n" \ 177 "2: \n" \
178 : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ 178 : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
179 : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ 179 : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
180 : "memory"); \ 180 : "memory"); \
181 } else { \ 181 } else { \
182 unsigned long __flags; \ 182 unsigned long __flags; \
diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h
index c73815e0123a..e081a265f422 100644
--- a/arch/mips/include/asm/compiler.h
+++ b/arch/mips/include/asm/compiler.h
@@ -16,12 +16,30 @@
16#define GCC_REG_ACCUM "accum" 16#define GCC_REG_ACCUM "accum"
17#endif 17#endif
18 18
19#ifdef CONFIG_CPU_MIPSR6
20/* All MIPS R6 toolchains support the ZC constrain */
21#define GCC_OFF_SMALL_ASM() "ZC"
22#else
19#ifndef CONFIG_CPU_MICROMIPS 23#ifndef CONFIG_CPU_MICROMIPS
20#define GCC_OFF12_ASM() "R" 24#define GCC_OFF_SMALL_ASM() "R"
21#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) 25#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
22#define GCC_OFF12_ASM() "ZC" 26#define GCC_OFF_SMALL_ASM() "ZC"
23#else 27#else
24#error "microMIPS compilation unsupported with GCC older than 4.9" 28#error "microMIPS compilation unsupported with GCC older than 4.9"
25#endif 29#endif /* CONFIG_CPU_MICROMIPS */
30#endif /* CONFIG_CPU_MIPSR6 */
31
32#ifdef CONFIG_CPU_MIPSR6
33#define MIPS_ISA_LEVEL "mips64r6"
34#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL
35#define MIPS_ISA_LEVEL_RAW mips64r6
36#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
37#else
38/* MIPS64 is a superset of MIPS32 */
39#define MIPS_ISA_LEVEL "mips64r2"
40#define MIPS_ISA_ARCH_LEVEL "arch=r4000"
41#define MIPS_ISA_LEVEL_RAW mips64r2
42#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
43#endif /* CONFIG_CPU_MIPSR6 */
26 44
27#endif /* _ASM_COMPILER_H */ 45#endif /* _ASM_COMPILER_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 2897cfafcaf0..0d8208de9a3f 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -38,6 +38,9 @@
38#ifndef cpu_has_maar 38#ifndef cpu_has_maar
39#define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR) 39#define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR)
40#endif 40#endif
41#ifndef cpu_has_rw_llb
42#define cpu_has_rw_llb (cpu_data[0].options & MIPS_CPU_RW_LLB)
43#endif
41 44
42/* 45/*
43 * For the moment we don't consider R6000 and R8000 so we can assume that 46 * For the moment we don't consider R6000 and R8000 so we can assume that
@@ -171,6 +174,9 @@
171#endif 174#endif
172#endif 175#endif
173 176
177#ifndef cpu_has_mips_1
178# define cpu_has_mips_1 (!cpu_has_mips_r6)
179#endif
174#ifndef cpu_has_mips_2 180#ifndef cpu_has_mips_2
175# define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II) 181# define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II)
176#endif 182#endif
@@ -189,12 +195,18 @@
189#ifndef cpu_has_mips32r2 195#ifndef cpu_has_mips32r2
190# define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2) 196# define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
191#endif 197#endif
198#ifndef cpu_has_mips32r6
199# define cpu_has_mips32r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6)
200#endif
192#ifndef cpu_has_mips64r1 201#ifndef cpu_has_mips64r1
193# define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1) 202# define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
194#endif 203#endif
195#ifndef cpu_has_mips64r2 204#ifndef cpu_has_mips64r2
196# define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2) 205# define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
197#endif 206#endif
207#ifndef cpu_has_mips64r6
208# define cpu_has_mips64r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6)
209#endif
198 210
199/* 211/*
200 * Shortcuts ... 212 * Shortcuts ...
@@ -208,17 +220,23 @@
208#define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r) 220#define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r)
209#define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r) 221#define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r)
210 222
211#define cpu_has_mips_4_5_r2 (cpu_has_mips_4_5 | cpu_has_mips_r2) 223#define cpu_has_mips_4_5_r2_r6 (cpu_has_mips_4_5 | cpu_has_mips_r2 | \
224 cpu_has_mips_r6)
212 225
213#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2) 226#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6)
214#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2) 227#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6)
215#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1) 228#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
216#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2) 229#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
230#define cpu_has_mips_r6 (cpu_has_mips32r6 | cpu_has_mips64r6)
217#define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \ 231#define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
218 cpu_has_mips64r1 | cpu_has_mips64r2) 232 cpu_has_mips32r6 | cpu_has_mips64r1 | \
233 cpu_has_mips64r2 | cpu_has_mips64r6)
234
235/* MIPSR2 and MIPSR6 have a lot of similarities */
236#define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6)
219 237
220#ifndef cpu_has_mips_r2_exec_hazard 238#ifndef cpu_has_mips_r2_exec_hazard
221#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2 239#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
222#endif 240#endif
223 241
224/* 242/*
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index a6c9ccb33c5c..c3f4f2d2e108 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -84,6 +84,11 @@ struct cpuinfo_mips {
84 * (shifted by _CACHE_SHIFT) 84 * (shifted by _CACHE_SHIFT)
85 */ 85 */
86 unsigned int writecombine; 86 unsigned int writecombine;
87 /*
88 * Simple counter to prevent enabling HTW in nested
89 * htw_start/htw_stop calls
90 */
91 unsigned int htw_seq;
87} __attribute__((aligned(SMP_CACHE_BYTES))); 92} __attribute__((aligned(SMP_CACHE_BYTES)));
88 93
89extern struct cpuinfo_mips cpu_data[]; 94extern struct cpuinfo_mips cpu_data[];
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index b4e2bd87df50..8245875f8b33 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -54,6 +54,13 @@ static inline int __pure __get_cpu_type(const int cpu_type)
54 case CPU_M5150: 54 case CPU_M5150:
55#endif 55#endif
56 56
57#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \
58 defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \
59 defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \
60 defined(CONFIG_SYS_HAS_CPU_MIPS64_R6)
61 case CPU_QEMU_GENERIC:
62#endif
63
57#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1 64#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
58 case CPU_5KC: 65 case CPU_5KC:
59 case CPU_5KE: 66 case CPU_5KE:
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 33866fce4d63..15687234d70a 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -93,6 +93,7 @@
93 * These are the PRID's for when 23:16 == PRID_COMP_MIPS 93 * These are the PRID's for when 23:16 == PRID_COMP_MIPS
94 */ 94 */
95 95
96#define PRID_IMP_QEMU_GENERIC 0x0000
96#define PRID_IMP_4KC 0x8000 97#define PRID_IMP_4KC 0x8000
97#define PRID_IMP_5KC 0x8100 98#define PRID_IMP_5KC 0x8100
98#define PRID_IMP_20KC 0x8200 99#define PRID_IMP_20KC 0x8200
@@ -312,6 +313,8 @@ enum cpu_type_enum {
312 CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, 313 CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
313 CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, 314 CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
314 315
316 CPU_QEMU_GENERIC,
317
315 CPU_LAST 318 CPU_LAST
316}; 319};
317 320
@@ -329,11 +332,14 @@ enum cpu_type_enum {
329#define MIPS_CPU_ISA_M32R2 0x00000020 332#define MIPS_CPU_ISA_M32R2 0x00000020
330#define MIPS_CPU_ISA_M64R1 0x00000040 333#define MIPS_CPU_ISA_M64R1 0x00000040
331#define MIPS_CPU_ISA_M64R2 0x00000080 334#define MIPS_CPU_ISA_M64R2 0x00000080
335#define MIPS_CPU_ISA_M32R6 0x00000100
336#define MIPS_CPU_ISA_M64R6 0x00000200
332 337
333#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \ 338#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \
334 MIPS_CPU_ISA_M32R2) 339 MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6)
335#define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \ 340#define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
336 MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2) 341 MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \
342 MIPS_CPU_ISA_M64R6)
337 343
338/* 344/*
339 * CPU Option encodings 345 * CPU Option encodings
@@ -370,6 +376,7 @@ enum cpu_type_enum {
370#define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */ 376#define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
371#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ 377#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */
372#define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ 378#define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */
379#define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */
373 380
374/* 381/*
375 * CPU ASE encodings 382 * CPU ASE encodings
diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h
index ae6fedcb0060..94105d3f58f4 100644
--- a/arch/mips/include/asm/edac.h
+++ b/arch/mips/include/asm/edac.h
@@ -26,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size)
26 " sc %0, %1 \n" 26 " sc %0, %1 \n"
27 " beqz %0, 1b \n" 27 " beqz %0, 1b \n"
28 " .set mips0 \n" 28 " .set mips0 \n"
29 : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr) 29 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr)
30 : GCC_OFF12_ASM() (*virt_addr)); 30 : GCC_OFF_SMALL_ASM() (*virt_addr));
31 31
32 virt_addr++; 32 virt_addr++;
33 } 33 }
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index eb4d95de619c..535f196ffe02 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -417,13 +417,15 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
417struct arch_elf_state { 417struct arch_elf_state {
418 int fp_abi; 418 int fp_abi;
419 int interp_fp_abi; 419 int interp_fp_abi;
420 int overall_abi; 420 int overall_fp_mode;
421}; 421};
422 422
423#define MIPS_ABI_FP_UNKNOWN (-1) /* Unknown FP ABI (kernel internal) */
424
423#define INIT_ARCH_ELF_STATE { \ 425#define INIT_ARCH_ELF_STATE { \
424 .fp_abi = -1, \ 426 .fp_abi = MIPS_ABI_FP_UNKNOWN, \
425 .interp_fp_abi = -1, \ 427 .interp_fp_abi = MIPS_ABI_FP_UNKNOWN, \
426 .overall_abi = -1, \ 428 .overall_fp_mode = -1, \
427} 429}
428 430
429extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf, 431extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index affebb78f5d6..dd083e999b08 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -68,7 +68,8 @@ static inline int __enable_fpu(enum fpu_mode mode)
68 goto fr_common; 68 goto fr_common;
69 69
70 case FPU_64BIT: 70 case FPU_64BIT:
71#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)) 71#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
72 || defined(CONFIG_64BIT))
72 /* we only have a 32-bit FPU */ 73 /* we only have a 32-bit FPU */
73 return SIGFPE; 74 return SIGFPE;
74#endif 75#endif
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index ef9987a61d88..1de190bdfb9c 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -45,19 +45,19 @@
45 " "__UA_ADDR "\t2b, 4b \n" \ 45 " "__UA_ADDR "\t2b, 4b \n" \
46 " .previous \n" \ 46 " .previous \n" \
47 : "=r" (ret), "=&r" (oldval), \ 47 : "=r" (ret), "=&r" (oldval), \
48 "=" GCC_OFF12_ASM() (*uaddr) \ 48 "=" GCC_OFF_SMALL_ASM() (*uaddr) \
49 : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ 49 : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \
50 "i" (-EFAULT) \ 50 "i" (-EFAULT) \
51 : "memory"); \ 51 : "memory"); \
52 } else if (cpu_has_llsc) { \ 52 } else if (cpu_has_llsc) { \
53 __asm__ __volatile__( \ 53 __asm__ __volatile__( \
54 " .set push \n" \ 54 " .set push \n" \
55 " .set noat \n" \ 55 " .set noat \n" \
56 " .set arch=r4000 \n" \ 56 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
57 "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ 57 "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
58 " .set mips0 \n" \ 58 " .set mips0 \n" \
59 " " insn " \n" \ 59 " " insn " \n" \
60 " .set arch=r4000 \n" \ 60 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
61 "2: "user_sc("$1", "%2")" \n" \ 61 "2: "user_sc("$1", "%2")" \n" \
62 " beqz $1, 1b \n" \ 62 " beqz $1, 1b \n" \
63 __WEAK_LLSC_MB \ 63 __WEAK_LLSC_MB \
@@ -74,8 +74,8 @@
74 " "__UA_ADDR "\t2b, 4b \n" \ 74 " "__UA_ADDR "\t2b, 4b \n" \
75 " .previous \n" \ 75 " .previous \n" \
76 : "=r" (ret), "=&r" (oldval), \ 76 : "=r" (ret), "=&r" (oldval), \
77 "=" GCC_OFF12_ASM() (*uaddr) \ 77 "=" GCC_OFF_SMALL_ASM() (*uaddr) \
78 : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ 78 : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \
79 "i" (-EFAULT) \ 79 "i" (-EFAULT) \
80 : "memory"); \ 80 : "memory"); \
81 } else \ 81 } else \
@@ -174,8 +174,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
174 " "__UA_ADDR "\t1b, 4b \n" 174 " "__UA_ADDR "\t1b, 4b \n"
175 " "__UA_ADDR "\t2b, 4b \n" 175 " "__UA_ADDR "\t2b, 4b \n"
176 " .previous \n" 176 " .previous \n"
177 : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) 177 : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
178 : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 178 : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
179 "i" (-EFAULT) 179 "i" (-EFAULT)
180 : "memory"); 180 : "memory");
181 } else if (cpu_has_llsc) { 181 } else if (cpu_has_llsc) {
@@ -183,12 +183,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
183 "# futex_atomic_cmpxchg_inatomic \n" 183 "# futex_atomic_cmpxchg_inatomic \n"
184 " .set push \n" 184 " .set push \n"
185 " .set noat \n" 185 " .set noat \n"
186 " .set arch=r4000 \n" 186 " .set "MIPS_ISA_ARCH_LEVEL" \n"
187 "1: "user_ll("%1", "%3")" \n" 187 "1: "user_ll("%1", "%3")" \n"
188 " bne %1, %z4, 3f \n" 188 " bne %1, %z4, 3f \n"
189 " .set mips0 \n" 189 " .set mips0 \n"
190 " move $1, %z5 \n" 190 " move $1, %z5 \n"
191 " .set arch=r4000 \n" 191 " .set "MIPS_ISA_ARCH_LEVEL" \n"
192 "2: "user_sc("$1", "%2")" \n" 192 "2: "user_sc("$1", "%2")" \n"
193 " beqz $1, 1b \n" 193 " beqz $1, 1b \n"
194 __WEAK_LLSC_MB 194 __WEAK_LLSC_MB
@@ -203,8 +203,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
203 " "__UA_ADDR "\t1b, 4b \n" 203 " "__UA_ADDR "\t1b, 4b \n"
204 " "__UA_ADDR "\t2b, 4b \n" 204 " "__UA_ADDR "\t2b, 4b \n"
205 " .previous \n" 205 " .previous \n"
206 : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) 206 : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
207 : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 207 : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
208 "i" (-EFAULT) 208 "i" (-EFAULT)
209 : "memory"); 209 : "memory");
210 } else 210 } else
diff --git a/arch/mips/include/asm/gio_device.h b/arch/mips/include/asm/gio_device.h
index 4be1a57cdbb0..71a986e9b694 100644
--- a/arch/mips/include/asm/gio_device.h
+++ b/arch/mips/include/asm/gio_device.h
@@ -25,8 +25,6 @@ struct gio_driver {
25 25
26 int (*probe)(struct gio_device *, const struct gio_device_id *); 26 int (*probe)(struct gio_device *, const struct gio_device_id *);
27 void (*remove)(struct gio_device *); 27 void (*remove)(struct gio_device *);
28 int (*suspend)(struct gio_device *, pm_message_t);
29 int (*resume)(struct gio_device *);
30 void (*shutdown)(struct gio_device *); 28 void (*shutdown)(struct gio_device *);
31 29
32 struct device_driver driver; 30 struct device_driver driver;
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index e3ee92d4dbe7..4087b47ad1cb 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -11,6 +11,7 @@
11#define _ASM_HAZARDS_H 11#define _ASM_HAZARDS_H
12 12
13#include <linux/stringify.h> 13#include <linux/stringify.h>
14#include <asm/compiler.h>
14 15
15#define ___ssnop \ 16#define ___ssnop \
16 sll $0, $0, 1 17 sll $0, $0, 1
@@ -21,7 +22,7 @@
21/* 22/*
22 * TLB hazards 23 * TLB hazards
23 */ 24 */
24#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON) 25#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
25 26
26/* 27/*
27 * MIPSR2 defines ehb for hazard avoidance 28 * MIPSR2 defines ehb for hazard avoidance
@@ -58,7 +59,7 @@ do { \
58 unsigned long tmp; \ 59 unsigned long tmp; \
59 \ 60 \
60 __asm__ __volatile__( \ 61 __asm__ __volatile__( \
61 " .set mips64r2 \n" \ 62 " .set "MIPS_ISA_LEVEL" \n" \
62 " dla %0, 1f \n" \ 63 " dla %0, 1f \n" \
63 " jr.hb %0 \n" \ 64 " jr.hb %0 \n" \
64 " .set mips0 \n" \ 65 " .set mips0 \n" \
@@ -132,7 +133,7 @@ do { \
132 133
133#define instruction_hazard() \ 134#define instruction_hazard() \
134do { \ 135do { \
135 if (cpu_has_mips_r2) \ 136 if (cpu_has_mips_r2_r6) \
136 __instruction_hazard(); \ 137 __instruction_hazard(); \
137} while (0) 138} while (0)
138 139
@@ -240,7 +241,7 @@ do { \
240 241
241#define __disable_fpu_hazard 242#define __disable_fpu_hazard
242 243
243#elif defined(CONFIG_CPU_MIPSR2) 244#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
244 245
245#define __enable_fpu_hazard \ 246#define __enable_fpu_hazard \
246 ___ehb 247 ___ehb
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 0fa5fdcd1f01..d60cc68fa31e 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -15,9 +15,10 @@
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/stringify.h> 17#include <linux/stringify.h>
18#include <asm/compiler.h>
18#include <asm/hazards.h> 19#include <asm/hazards.h>
19 20
20#ifdef CONFIG_CPU_MIPSR2 21#if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6)
21 22
22static inline void arch_local_irq_disable(void) 23static inline void arch_local_irq_disable(void)
23{ 24{
@@ -118,7 +119,7 @@ void arch_local_irq_disable(void);
118unsigned long arch_local_irq_save(void); 119unsigned long arch_local_irq_save(void);
119void arch_local_irq_restore(unsigned long flags); 120void arch_local_irq_restore(unsigned long flags);
120void __arch_local_irq_restore(unsigned long flags); 121void __arch_local_irq_restore(unsigned long flags);
121#endif /* CONFIG_CPU_MIPSR2 */ 122#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
122 123
123static inline void arch_local_irq_enable(void) 124static inline void arch_local_irq_enable(void)
124{ 125{
@@ -126,7 +127,7 @@ static inline void arch_local_irq_enable(void)
126 " .set push \n" 127 " .set push \n"
127 " .set reorder \n" 128 " .set reorder \n"
128 " .set noat \n" 129 " .set noat \n"
129#if defined(CONFIG_CPU_MIPSR2) 130#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
130 " ei \n" 131 " ei \n"
131#else 132#else
132 " mfc0 $1,$12 \n" 133 " mfc0 $1,$12 \n"
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 46dfc3c1fd49..8feaed62a2ab 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -5,6 +5,7 @@
5#include <linux/bitops.h> 5#include <linux/bitops.h>
6#include <linux/atomic.h> 6#include <linux/atomic.h>
7#include <asm/cmpxchg.h> 7#include <asm/cmpxchg.h>
8#include <asm/compiler.h>
8#include <asm/war.h> 9#include <asm/war.h>
9 10
10typedef struct 11typedef struct
@@ -47,7 +48,7 @@ static __inline__ long local_add_return(long i, local_t * l)
47 unsigned long temp; 48 unsigned long temp;
48 49
49 __asm__ __volatile__( 50 __asm__ __volatile__(
50 " .set arch=r4000 \n" 51 " .set "MIPS_ISA_ARCH_LEVEL" \n"
51 "1:" __LL "%1, %2 # local_add_return \n" 52 "1:" __LL "%1, %2 # local_add_return \n"
52 " addu %0, %1, %3 \n" 53 " addu %0, %1, %3 \n"
53 __SC "%0, %2 \n" 54 __SC "%0, %2 \n"
@@ -92,7 +93,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
92 unsigned long temp; 93 unsigned long temp;
93 94
94 __asm__ __volatile__( 95 __asm__ __volatile__(
95 " .set arch=r4000 \n" 96 " .set "MIPS_ISA_ARCH_LEVEL" \n"
96 "1:" __LL "%1, %2 # local_sub_return \n" 97 "1:" __LL "%1, %2 # local_sub_return \n"
97 " subu %0, %1, %3 \n" 98 " subu %0, %1, %3 \n"
98 __SC "%0, %2 \n" 99 __SC "%0, %2 \n"
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
index 1668ee57acb9..cf92fe733995 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -8,11 +8,10 @@
8#ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H 8#ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
9#define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H 9#define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
10 10
11
12#define CP0_CYCLE_COUNTER $9, 6
13#define CP0_CVMCTL_REG $9, 7 11#define CP0_CVMCTL_REG $9, 7
14#define CP0_CVMMEMCTL_REG $11,7 12#define CP0_CVMMEMCTL_REG $11,7
15#define CP0_PRID_REG $15, 0 13#define CP0_PRID_REG $15, 0
14#define CP0_DCACHE_ERR_REG $27, 1
16#define CP0_PRID_OCTEON_PASS1 0x000d0000 15#define CP0_PRID_OCTEON_PASS1 0x000d0000
17#define CP0_PRID_OCTEON_CN30XX 0x000d0200 16#define CP0_PRID_OCTEON_CN30XX 0x000d0200
18 17
@@ -38,36 +37,55 @@
38 # Needed for octeon specific memcpy 37 # Needed for octeon specific memcpy
39 or v0, v0, 0x5001 38 or v0, v0, 0x5001
40 xor v0, v0, 0x1001 39 xor v0, v0, 0x1001
41 # Read the processor ID register
42 mfc0 v1, CP0_PRID_REG
43 # Disable instruction prefetching (Octeon Pass1 errata)
44 or v0, v0, 0x2000
45 # Skip reenable of prefetching for Octeon Pass1
46 beq v1, CP0_PRID_OCTEON_PASS1, skip
47 nop
48 # Reenable instruction prefetching, not on Pass1
49 xor v0, v0, 0x2000
50 # Strip off pass number off of processor id
51 srl v1, 8
52 sll v1, 8
53 # CN30XX needs some extra stuff turned off for better performance
54 bne v1, CP0_PRID_OCTEON_CN30XX, skip
55 nop
56 # CN30XX Use random Icache replacement
57 or v0, v0, 0x400
58 # CN30XX Disable instruction prefetching
59 or v0, v0, 0x2000
60skip:
61 # First clear off CvmCtl[IPPCI] bit and move the performance 40 # First clear off CvmCtl[IPPCI] bit and move the performance
62 # counters interrupt to IRQ 6 41 # counters interrupt to IRQ 6
63 li v1, ~(7 << 7) 42 dli v1, ~(7 << 7)
64 and v0, v0, v1 43 and v0, v0, v1
65 ori v0, v0, (6 << 7) 44 ori v0, v0, (6 << 7)
45
46 mfc0 v1, CP0_PRID_REG
47 and t1, v1, 0xfff8
48 xor t1, t1, 0x9000 # 63-P1
49 beqz t1, 4f
50 and t1, v1, 0xfff8
51 xor t1, t1, 0x9008 # 63-P2
52 beqz t1, 4f
53 and t1, v1, 0xfff8
54 xor t1, t1, 0x9100 # 68-P1
55 beqz t1, 4f
56 and t1, v1, 0xff00
57 xor t1, t1, 0x9200 # 66-PX
58 bnez t1, 5f # Skip WAR for others.
59 and t1, v1, 0x00ff
60 slti t1, t1, 2 # 66-P1.2 and later good.
61 beqz t1, 5f
62
634: # core-16057 work around
64 or v0, v0, 0x2000 # Set IPREF bit.
65
665: # No core-16057 work around
66 # Write the cavium control register 67 # Write the cavium control register
67 dmtc0 v0, CP0_CVMCTL_REG 68 dmtc0 v0, CP0_CVMCTL_REG
68 sync 69 sync
69 # Flush dcache after config change 70 # Flush dcache after config change
70 cache 9, 0($0) 71 cache 9, 0($0)
72 # Zero all of CVMSEG to make sure parity is correct
73 dli v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
74 dsll v0, 7
75 beqz v0, 2f
761: dsubu v0, 8
77 sd $0, -32768(v0)
78 bnez v0, 1b
792:
80 mfc0 v0, CP0_PRID_REG
81 bbit0 v0, 15, 1f
82 # OCTEON II or better have bit 15 set. Clear the error bits.
83 and t1, v0, 0xff00
84 dli v0, 0x9500
85 bge t1, v0, 1f # OCTEON III has no DCACHE_ERR_REG COP0
86 dli v0, 0x27
87 dmtc0 v0, CP0_DCACHE_ERR_REG
881:
71 # Get my core id 89 # Get my core id
72 rdhwr v0, $0 90 rdhwr v0, $0
73 # Jump the master to kernel_entry 91 # Jump the master to kernel_entry
diff --git a/arch/mips/include/asm/mach-cavium-octeon/war.h b/arch/mips/include/asm/mach-cavium-octeon/war.h
index eb72b35cf04b..35c80be92207 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/war.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/war.h
@@ -22,4 +22,7 @@
22#define R10000_LLSC_WAR 0 22#define R10000_LLSC_WAR 0
23#define MIPS34K_MISSED_ITLB_WAR 0 23#define MIPS34K_MISSED_ITLB_WAR 0
24 24
25#define CAVIUM_OCTEON_DCACHE_PREFETCH_WAR \
26 OCTEON_IS_MODEL(OCTEON_CN6XXX)
27
25#endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */ 28#endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */
diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
index 986982db7c38..79cff26d8b36 100644
--- a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
+++ b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
@@ -27,8 +27,6 @@ struct jz_nand_platform_data {
27 27
28 struct nand_ecclayout *ecc_layout; 28 struct nand_ecclayout *ecc_layout;
29 29
30 unsigned int busy_gpio;
31
32 unsigned char banks[JZ_NAND_NUM_BANKS]; 30 unsigned char banks[JZ_NAND_NUM_BANKS];
33 31
34 void (*ident_callback)(struct platform_device *, struct nand_chip *, 32 void (*ident_callback)(struct platform_device *, struct nand_chip *,
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
index 2e54b4bff5cf..90dbe43c8d27 100644
--- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
@@ -85,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr,
85 " "__beqz"%0, 1b \n" 85 " "__beqz"%0, 1b \n"
86 " nop \n" 86 " nop \n"
87 " .set pop \n" 87 " .set pop \n"
88 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 88 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
89 : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr)); 89 : "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr));
90} 90}
91 91
92/* 92/*
@@ -106,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr,
106 " "__beqz"%0, 1b \n" 106 " "__beqz"%0, 1b \n"
107 " nop \n" 107 " nop \n"
108 " .set pop \n" 108 " .set pop \n"
109 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 109 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
110 : "ir" (mask), GCC_OFF12_ASM() (*addr)); 110 : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
111} 111}
112 112
113/* 113/*
@@ -127,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr,
127 " "__beqz"%0, 1b \n" 127 " "__beqz"%0, 1b \n"
128 " nop \n" 128 " nop \n"
129 " .set pop \n" 129 " .set pop \n"
130 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 130 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
131 : "ir" (~mask), GCC_OFF12_ASM() (*addr)); 131 : "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr));
132} 132}
133 133
134/* 134/*
@@ -148,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr,
148 " "__beqz"%0, 1b \n" 148 " "__beqz"%0, 1b \n"
149 " nop \n" 149 " nop \n"
150 " .set pop \n" 150 " .set pop \n"
151 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 151 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
152 : "ir" (mask), GCC_OFF12_ASM() (*addr)); 152 : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
153} 153}
154 154
155/* 155/*
@@ -220,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
220 " .set arch=r4000 \n" \ 220 " .set arch=r4000 \n" \
221 "1: ll %0, %1 #custom_read_reg32 \n" \ 221 "1: ll %0, %1 #custom_read_reg32 \n" \
222 " .set pop \n" \ 222 " .set pop \n" \
223 : "=r" (tmp), "=" GCC_OFF12_ASM() (*address) \ 223 : "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \
224 : GCC_OFF12_ASM() (*address)) 224 : GCC_OFF_SMALL_ASM() (*address))
225 225
226#define custom_write_reg32(address, tmp) \ 226#define custom_write_reg32(address, tmp) \
227 __asm__ __volatile__( \ 227 __asm__ __volatile__( \
@@ -231,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
231 " "__beqz"%0, 1b \n" \ 231 " "__beqz"%0, 1b \n" \
232 " nop \n" \ 232 " nop \n" \
233 " .set pop \n" \ 233 " .set pop \n" \
234 : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address) \ 234 : "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \
235 : "0" (tmp), GCC_OFF12_ASM() (*address)) 235 : "0" (tmp), GCC_OFF_SMALL_ASM() (*address))
236 236
237#endif /* __ASM_REGOPS_H__ */ 237#endif /* __ASM_REGOPS_H__ */
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h
new file mode 100644
index 000000000000..60570f2c3ba2
--- /dev/null
+++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h
@@ -0,0 +1,96 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2014 Imagination Technologies Ltd.
7 * Author: Markos Chandras <markos.chandras@imgtec.com>
8 */
9
10#ifndef __ASM_MIPS_R2_TO_R6_EMUL_H
11#define __ASM_MIPS_R2_TO_R6_EMUL_H
12
13struct mips_r2_emulator_stats {
14 u64 movs;
15 u64 hilo;
16 u64 muls;
17 u64 divs;
18 u64 dsps;
19 u64 bops;
20 u64 traps;
21 u64 fpus;
22 u64 loads;
23 u64 stores;
24 u64 llsc;
25 u64 dsemul;
26};
27
28struct mips_r2br_emulator_stats {
29 u64 jrs;
30 u64 bltzl;
31 u64 bgezl;
32 u64 bltzll;
33 u64 bgezll;
34 u64 bltzall;
35 u64 bgezall;
36 u64 bltzal;
37 u64 bgezal;
38 u64 beql;
39 u64 bnel;
40 u64 blezl;
41 u64 bgtzl;
42};
43
44#ifdef CONFIG_DEBUG_FS
45
46#define MIPS_R2_STATS(M) \
47do { \
48 u32 nir; \
49 int err; \
50 \
51 preempt_disable(); \
52 __this_cpu_inc(mipsr2emustats.M); \
53 err = __get_user(nir, (u32 __user *)regs->cp0_epc); \
54 if (!err) { \
55 if (nir == BREAK_MATH) \
56 __this_cpu_inc(mipsr2bdemustats.M); \
57 } \
58 preempt_enable(); \
59} while (0)
60
61#define MIPS_R2BR_STATS(M) \
62do { \
63 preempt_disable(); \
64 __this_cpu_inc(mipsr2bremustats.M); \
65 preempt_enable(); \
66} while (0)
67
68#else
69
70#define MIPS_R2_STATS(M) do { } while (0)
71#define MIPS_R2BR_STATS(M) do { } while (0)
72
73#endif /* CONFIG_DEBUG_FS */
74
75struct r2_decoder_table {
76 u32 mask;
77 u32 code;
78 int (*func)(struct pt_regs *regs, u32 inst);
79};
80
81
82extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
83 const char *str);
84
85#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
86static int mipsr2_emulation;
87static __maybe_unused int mipsr2_decoder(struct pt_regs *regs, u32 inst) { return 0; };
88#else
89/* MIPS R2 Emulator ON/OFF */
90extern int mipsr2_emulation;
91extern int mipsr2_decoder(struct pt_regs *regs, u32 inst);
92#endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */
93
94#define NO_R6EMU (cpu_has_mips_r6 && !mipsr2_emulation)
95
96#endif /* __ASM_MIPS_R2_TO_R6_EMUL_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 5b720d8c2745..fef004434096 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -653,6 +653,7 @@
653#define MIPS_CONF5_NF (_ULCAST_(1) << 0) 653#define MIPS_CONF5_NF (_ULCAST_(1) << 0)
654#define MIPS_CONF5_UFR (_ULCAST_(1) << 2) 654#define MIPS_CONF5_UFR (_ULCAST_(1) << 2)
655#define MIPS_CONF5_MRP (_ULCAST_(1) << 3) 655#define MIPS_CONF5_MRP (_ULCAST_(1) << 3)
656#define MIPS_CONF5_LLB (_ULCAST_(1) << 4)
656#define MIPS_CONF5_MVH (_ULCAST_(1) << 5) 657#define MIPS_CONF5_MVH (_ULCAST_(1) << 5)
657#define MIPS_CONF5_FRE (_ULCAST_(1) << 8) 658#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
658#define MIPS_CONF5_UFE (_ULCAST_(1) << 9) 659#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
@@ -1127,6 +1128,8 @@ do { \
1127#define write_c0_config6(val) __write_32bit_c0_register($16, 6, val) 1128#define write_c0_config6(val) __write_32bit_c0_register($16, 6, val)
1128#define write_c0_config7(val) __write_32bit_c0_register($16, 7, val) 1129#define write_c0_config7(val) __write_32bit_c0_register($16, 7, val)
1129 1130
1131#define read_c0_lladdr() __read_ulong_c0_register($17, 0)
1132#define write_c0_lladdr(val) __write_ulong_c0_register($17, 0, val)
1130#define read_c0_maar() __read_ulong_c0_register($17, 1) 1133#define read_c0_maar() __read_ulong_c0_register($17, 1)
1131#define write_c0_maar(val) __write_ulong_c0_register($17, 1, val) 1134#define write_c0_maar(val) __write_ulong_c0_register($17, 1, val)
1132#define read_c0_maari() __read_32bit_c0_register($17, 2) 1135#define read_c0_maari() __read_32bit_c0_register($17, 2)
@@ -1909,6 +1912,7 @@ __BUILD_SET_C0(config5)
1909__BUILD_SET_C0(intcontrol) 1912__BUILD_SET_C0(intcontrol)
1910__BUILD_SET_C0(intctl) 1913__BUILD_SET_C0(intctl)
1911__BUILD_SET_C0(srsmap) 1914__BUILD_SET_C0(srsmap)
1915__BUILD_SET_C0(pagegrain)
1912__BUILD_SET_C0(brcm_config_0) 1916__BUILD_SET_C0(brcm_config_0)
1913__BUILD_SET_C0(brcm_bus_pll) 1917__BUILD_SET_C0(brcm_bus_pll)
1914__BUILD_SET_C0(brcm_reset) 1918__BUILD_SET_C0(brcm_reset)
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index c436138945a8..1afa1f986df8 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -1,9 +1,12 @@
1#ifndef __ASM_MMU_H 1#ifndef __ASM_MMU_H
2#define __ASM_MMU_H 2#define __ASM_MMU_H
3 3
4#include <linux/atomic.h>
5
4typedef struct { 6typedef struct {
5 unsigned long asid[NR_CPUS]; 7 unsigned long asid[NR_CPUS];
6 void *vdso; 8 void *vdso;
9 atomic_t fp_mode_switching;
7} mm_context_t; 10} mm_context_t;
8 11
9#endif /* __ASM_MMU_H */ 12#endif /* __ASM_MMU_H */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 2f82568a3ee4..45914b59824c 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -25,7 +25,6 @@ do { \
25 if (cpu_has_htw) { \ 25 if (cpu_has_htw) { \
26 write_c0_pwbase(pgd); \ 26 write_c0_pwbase(pgd); \
27 back_to_back_c0_hazard(); \ 27 back_to_back_c0_hazard(); \
28 htw_reset(); \
29 } \ 28 } \
30} while (0) 29} while (0)
31 30
@@ -132,6 +131,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
132 for_each_possible_cpu(i) 131 for_each_possible_cpu(i)
133 cpu_context(i, mm) = 0; 132 cpu_context(i, mm) = 0;
134 133
134 atomic_set(&mm->context.fp_mode_switching, 0);
135
135 return 0; 136 return 0;
136} 137}
137 138
@@ -142,6 +143,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
142 unsigned long flags; 143 unsigned long flags;
143 local_irq_save(flags); 144 local_irq_save(flags);
144 145
146 htw_stop();
145 /* Check if our ASID is of an older version and thus invalid */ 147 /* Check if our ASID is of an older version and thus invalid */
146 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) 148 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
147 get_new_mmu_context(next, cpu); 149 get_new_mmu_context(next, cpu);
@@ -154,6 +156,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
154 */ 156 */
155 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 157 cpumask_clear_cpu(cpu, mm_cpumask(prev));
156 cpumask_set_cpu(cpu, mm_cpumask(next)); 158 cpumask_set_cpu(cpu, mm_cpumask(next));
159 htw_start();
157 160
158 local_irq_restore(flags); 161 local_irq_restore(flags);
159} 162}
@@ -180,6 +183,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
180 183
181 local_irq_save(flags); 184 local_irq_save(flags);
182 185
186 htw_stop();
183 /* Unconditionally get a new ASID. */ 187 /* Unconditionally get a new ASID. */
184 get_new_mmu_context(next, cpu); 188 get_new_mmu_context(next, cpu);
185 189
@@ -189,6 +193,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
189 /* mark mmu ownership change */ 193 /* mark mmu ownership change */
190 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 194 cpumask_clear_cpu(cpu, mm_cpumask(prev));
191 cpumask_set_cpu(cpu, mm_cpumask(next)); 195 cpumask_set_cpu(cpu, mm_cpumask(next));
196 htw_start();
192 197
193 local_irq_restore(flags); 198 local_irq_restore(flags);
194} 199}
@@ -203,6 +208,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
203 unsigned long flags; 208 unsigned long flags;
204 209
205 local_irq_save(flags); 210 local_irq_save(flags);
211 htw_stop();
206 212
207 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { 213 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
208 get_new_mmu_context(mm, cpu); 214 get_new_mmu_context(mm, cpu);
@@ -211,6 +217,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
211 /* will get a new context next time */ 217 /* will get a new context next time */
212 cpu_context(cpu, mm) = 0; 218 cpu_context(cpu, mm) = 0;
213 } 219 }
220 htw_start();
214 local_irq_restore(flags); 221 local_irq_restore(flags);
215} 222}
216 223
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 800fe578dc99..0aaf9a01ea50 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -88,10 +88,14 @@ search_module_dbetables(unsigned long addr)
88#define MODULE_PROC_FAMILY "MIPS32_R1 " 88#define MODULE_PROC_FAMILY "MIPS32_R1 "
89#elif defined CONFIG_CPU_MIPS32_R2 89#elif defined CONFIG_CPU_MIPS32_R2
90#define MODULE_PROC_FAMILY "MIPS32_R2 " 90#define MODULE_PROC_FAMILY "MIPS32_R2 "
91#elif defined CONFIG_CPU_MIPS32_R6
92#define MODULE_PROC_FAMILY "MIPS32_R6 "
91#elif defined CONFIG_CPU_MIPS64_R1 93#elif defined CONFIG_CPU_MIPS64_R1
92#define MODULE_PROC_FAMILY "MIPS64_R1 " 94#define MODULE_PROC_FAMILY "MIPS64_R1 "
93#elif defined CONFIG_CPU_MIPS64_R2 95#elif defined CONFIG_CPU_MIPS64_R2
94#define MODULE_PROC_FAMILY "MIPS64_R2 " 96#define MODULE_PROC_FAMILY "MIPS64_R2 "
97#elif defined CONFIG_CPU_MIPS64_R6
98#define MODULE_PROC_FAMILY "MIPS64_R6 "
95#elif defined CONFIG_CPU_R3000 99#elif defined CONFIG_CPU_R3000
96#define MODULE_PROC_FAMILY "R3000 " 100#define MODULE_PROC_FAMILY "R3000 "
97#elif defined CONFIG_CPU_TX39XX 101#elif defined CONFIG_CPU_TX39XX
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
index 75739c83f07e..8d05d9069823 100644
--- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -275,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
275 " lbu %[ticket], %[now_serving]\n" 275 " lbu %[ticket], %[now_serving]\n"
276 "4:\n" 276 "4:\n"
277 ".set pop\n" : 277 ".set pop\n" :
278 [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), 278 [ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
279 [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), 279 [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
280 [my_ticket] "=r"(my_ticket) 280 [my_ticket] "=r"(my_ticket)
281 ); 281 );
diff --git a/arch/mips/include/asm/octeon/cvmx-rst-defs.h b/arch/mips/include/asm/octeon/cvmx-rst-defs.h
new file mode 100644
index 000000000000..0c9c3e74d4ae
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-rst-defs.h
@@ -0,0 +1,306 @@
1/***********************license start***************
2 * Author: Cavium Inc.
3 *
4 * Contact: support@cavium.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2014 Cavium Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Inc. for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_RST_DEFS_H__
29#define __CVMX_RST_DEFS_H__
30
31#define CVMX_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180006001600ull))
32#define CVMX_RST_CFG (CVMX_ADD_IO_SEG(0x0001180006001610ull))
33#define CVMX_RST_CKILL (CVMX_ADD_IO_SEG(0x0001180006001638ull))
34#define CVMX_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180006001640ull) + ((offset) & 3) * 8)
35#define CVMX_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180006001608ull))
36#define CVMX_RST_ECO (CVMX_ADD_IO_SEG(0x00011800060017B8ull))
37#define CVMX_RST_INT (CVMX_ADD_IO_SEG(0x0001180006001628ull))
38#define CVMX_RST_OCX (CVMX_ADD_IO_SEG(0x0001180006001618ull))
39#define CVMX_RST_POWER_DBG (CVMX_ADD_IO_SEG(0x0001180006001708ull))
40#define CVMX_RST_PP_POWER (CVMX_ADD_IO_SEG(0x0001180006001700ull))
41#define CVMX_RST_SOFT_PRSTX(offset) (CVMX_ADD_IO_SEG(0x00011800060016C0ull) + ((offset) & 3) * 8)
42#define CVMX_RST_SOFT_RST (CVMX_ADD_IO_SEG(0x0001180006001680ull))
43
44union cvmx_rst_boot {
45 uint64_t u64;
46 struct cvmx_rst_boot_s {
47#ifdef __BIG_ENDIAN_BITFIELD
48 uint64_t chipkill:1;
49 uint64_t jtcsrdis:1;
50 uint64_t ejtagdis:1;
51 uint64_t romen:1;
52 uint64_t ckill_ppdis:1;
53 uint64_t jt_tstmode:1;
54 uint64_t vrm_err:1;
55 uint64_t reserved_37_56:20;
56 uint64_t c_mul:7;
57 uint64_t pnr_mul:6;
58 uint64_t reserved_21_23:3;
59 uint64_t lboot_oci:3;
60 uint64_t lboot_ext:6;
61 uint64_t lboot:10;
62 uint64_t rboot:1;
63 uint64_t rboot_pin:1;
64#else
65 uint64_t rboot_pin:1;
66 uint64_t rboot:1;
67 uint64_t lboot:10;
68 uint64_t lboot_ext:6;
69 uint64_t lboot_oci:3;
70 uint64_t reserved_21_23:3;
71 uint64_t pnr_mul:6;
72 uint64_t c_mul:7;
73 uint64_t reserved_37_56:20;
74 uint64_t vrm_err:1;
75 uint64_t jt_tstmode:1;
76 uint64_t ckill_ppdis:1;
77 uint64_t romen:1;
78 uint64_t ejtagdis:1;
79 uint64_t jtcsrdis:1;
80 uint64_t chipkill:1;
81#endif
82 } s;
83 struct cvmx_rst_boot_s cn70xx;
84 struct cvmx_rst_boot_s cn70xxp1;
85 struct cvmx_rst_boot_s cn78xx;
86};
87
88union cvmx_rst_cfg {
89 uint64_t u64;
90 struct cvmx_rst_cfg_s {
91#ifdef __BIG_ENDIAN_BITFIELD
92 uint64_t bist_delay:58;
93 uint64_t reserved_3_5:3;
94 uint64_t cntl_clr_bist:1;
95 uint64_t warm_clr_bist:1;
96 uint64_t soft_clr_bist:1;
97#else
98 uint64_t soft_clr_bist:1;
99 uint64_t warm_clr_bist:1;
100 uint64_t cntl_clr_bist:1;
101 uint64_t reserved_3_5:3;
102 uint64_t bist_delay:58;
103#endif
104 } s;
105 struct cvmx_rst_cfg_s cn70xx;
106 struct cvmx_rst_cfg_s cn70xxp1;
107 struct cvmx_rst_cfg_s cn78xx;
108};
109
110union cvmx_rst_ckill {
111 uint64_t u64;
112 struct cvmx_rst_ckill_s {
113#ifdef __BIG_ENDIAN_BITFIELD
114 uint64_t reserved_47_63:17;
115 uint64_t timer:47;
116#else
117 uint64_t timer:47;
118 uint64_t reserved_47_63:17;
119#endif
120 } s;
121 struct cvmx_rst_ckill_s cn70xx;
122 struct cvmx_rst_ckill_s cn70xxp1;
123 struct cvmx_rst_ckill_s cn78xx;
124};
125
126union cvmx_rst_ctlx {
127 uint64_t u64;
128 struct cvmx_rst_ctlx_s {
129#ifdef __BIG_ENDIAN_BITFIELD
130 uint64_t reserved_10_63:54;
131 uint64_t prst_link:1;
132 uint64_t rst_done:1;
133 uint64_t rst_link:1;
134 uint64_t host_mode:1;
135 uint64_t reserved_4_5:2;
136 uint64_t rst_drv:1;
137 uint64_t rst_rcv:1;
138 uint64_t rst_chip:1;
139 uint64_t rst_val:1;
140#else
141 uint64_t rst_val:1;
142 uint64_t rst_chip:1;
143 uint64_t rst_rcv:1;
144 uint64_t rst_drv:1;
145 uint64_t reserved_4_5:2;
146 uint64_t host_mode:1;
147 uint64_t rst_link:1;
148 uint64_t rst_done:1;
149 uint64_t prst_link:1;
150 uint64_t reserved_10_63:54;
151#endif
152 } s;
153 struct cvmx_rst_ctlx_s cn70xx;
154 struct cvmx_rst_ctlx_s cn70xxp1;
155 struct cvmx_rst_ctlx_s cn78xx;
156};
157
158union cvmx_rst_delay {
159 uint64_t u64;
160 struct cvmx_rst_delay_s {
161#ifdef __BIG_ENDIAN_BITFIELD
162 uint64_t reserved_32_63:32;
163 uint64_t warm_rst_dly:16;
164 uint64_t soft_rst_dly:16;
165#else
166 uint64_t soft_rst_dly:16;
167 uint64_t warm_rst_dly:16;
168 uint64_t reserved_32_63:32;
169#endif
170 } s;
171 struct cvmx_rst_delay_s cn70xx;
172 struct cvmx_rst_delay_s cn70xxp1;
173 struct cvmx_rst_delay_s cn78xx;
174};
175
176union cvmx_rst_eco {
177 uint64_t u64;
178 struct cvmx_rst_eco_s {
179#ifdef __BIG_ENDIAN_BITFIELD
180 uint64_t reserved_32_63:32;
181 uint64_t eco_rw:32;
182#else
183 uint64_t eco_rw:32;
184 uint64_t reserved_32_63:32;
185#endif
186 } s;
187 struct cvmx_rst_eco_s cn78xx;
188};
189
190union cvmx_rst_int {
191 uint64_t u64;
192 struct cvmx_rst_int_s {
193#ifdef __BIG_ENDIAN_BITFIELD
194 uint64_t reserved_12_63:52;
195 uint64_t perst:4;
196 uint64_t reserved_4_7:4;
197 uint64_t rst_link:4;
198#else
199 uint64_t rst_link:4;
200 uint64_t reserved_4_7:4;
201 uint64_t perst:4;
202 uint64_t reserved_12_63:52;
203#endif
204 } s;
205 struct cvmx_rst_int_cn70xx {
206#ifdef __BIG_ENDIAN_BITFIELD
207 uint64_t reserved_11_63:53;
208 uint64_t perst:3;
209 uint64_t reserved_3_7:5;
210 uint64_t rst_link:3;
211#else
212 uint64_t rst_link:3;
213 uint64_t reserved_3_7:5;
214 uint64_t perst:3;
215 uint64_t reserved_11_63:53;
216#endif
217 } cn70xx;
218 struct cvmx_rst_int_cn70xx cn70xxp1;
219 struct cvmx_rst_int_s cn78xx;
220};
221
222union cvmx_rst_ocx {
223 uint64_t u64;
224 struct cvmx_rst_ocx_s {
225#ifdef __BIG_ENDIAN_BITFIELD
226 uint64_t reserved_3_63:61;
227 uint64_t rst_link:3;
228#else
229 uint64_t rst_link:3;
230 uint64_t reserved_3_63:61;
231#endif
232 } s;
233 struct cvmx_rst_ocx_s cn78xx;
234};
235
236union cvmx_rst_power_dbg {
237 uint64_t u64;
238 struct cvmx_rst_power_dbg_s {
239#ifdef __BIG_ENDIAN_BITFIELD
240 uint64_t reserved_3_63:61;
241 uint64_t str:3;
242#else
243 uint64_t str:3;
244 uint64_t reserved_3_63:61;
245#endif
246 } s;
247 struct cvmx_rst_power_dbg_s cn78xx;
248};
249
250union cvmx_rst_pp_power {
251 uint64_t u64;
252 struct cvmx_rst_pp_power_s {
253#ifdef __BIG_ENDIAN_BITFIELD
254 uint64_t reserved_48_63:16;
255 uint64_t gate:48;
256#else
257 uint64_t gate:48;
258 uint64_t reserved_48_63:16;
259#endif
260 } s;
261 struct cvmx_rst_pp_power_cn70xx {
262#ifdef __BIG_ENDIAN_BITFIELD
263 uint64_t reserved_4_63:60;
264 uint64_t gate:4;
265#else
266 uint64_t gate:4;
267 uint64_t reserved_4_63:60;
268#endif
269 } cn70xx;
270 struct cvmx_rst_pp_power_cn70xx cn70xxp1;
271 struct cvmx_rst_pp_power_s cn78xx;
272};
273
274union cvmx_rst_soft_prstx {
275 uint64_t u64;
276 struct cvmx_rst_soft_prstx_s {
277#ifdef __BIG_ENDIAN_BITFIELD
278 uint64_t reserved_1_63:63;
279 uint64_t soft_prst:1;
280#else
281 uint64_t soft_prst:1;
282 uint64_t reserved_1_63:63;
283#endif
284 } s;
285 struct cvmx_rst_soft_prstx_s cn70xx;
286 struct cvmx_rst_soft_prstx_s cn70xxp1;
287 struct cvmx_rst_soft_prstx_s cn78xx;
288};
289
290union cvmx_rst_soft_rst {
291 uint64_t u64;
292 struct cvmx_rst_soft_rst_s {
293#ifdef __BIG_ENDIAN_BITFIELD
294 uint64_t reserved_1_63:63;
295 uint64_t soft_rst:1;
296#else
297 uint64_t soft_rst:1;
298 uint64_t reserved_1_63:63;
299#endif
300 } s;
301 struct cvmx_rst_soft_rst_s cn70xx;
302 struct cvmx_rst_soft_rst_s cn70xxp1;
303 struct cvmx_rst_soft_rst_s cn78xx;
304};
305
306#endif
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
index e8a1c2fd52cd..92b377e36dac 100644
--- a/arch/mips/include/asm/octeon/octeon-model.h
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -45,6 +45,7 @@
45 */ 45 */
46 46
47#define OCTEON_FAMILY_MASK 0x00ffff00 47#define OCTEON_FAMILY_MASK 0x00ffff00
48#define OCTEON_PRID_MASK 0x00ffffff
48 49
49/* Flag bits in top byte */ 50/* Flag bits in top byte */
50/* Ignores revision in model checks */ 51/* Ignores revision in model checks */
@@ -63,11 +64,52 @@
63#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000 64#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000
64/* Match all cnf7XXX Octeon models. */ 65/* Match all cnf7XXX Octeon models. */
65#define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000 66#define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000
67/* Match all cn7XXX Octeon models. */
68#define OM_MATCH_7XXX_FAMILY_MODELS 0x10000000
69#define OM_MATCH_FAMILY_MODELS (OM_MATCH_5XXX_FAMILY_MODELS | \
70 OM_MATCH_6XXX_FAMILY_MODELS | \
71 OM_MATCH_F7XXX_FAMILY_MODELS | \
72 OM_MATCH_7XXX_FAMILY_MODELS)
73/*
74 * CN7XXX models with new revision encoding
75 */
76
77#define OCTEON_CN73XX_PASS1_0 0x000d9700
78#define OCTEON_CN73XX (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION)
79#define OCTEON_CN73XX_PASS1_X (OCTEON_CN73XX_PASS1_0 | \
80 OM_IGNORE_MINOR_REVISION)
81
82#define OCTEON_CN70XX_PASS1_0 0x000d9600
83#define OCTEON_CN70XX_PASS1_1 0x000d9601
84#define OCTEON_CN70XX_PASS1_2 0x000d9602
85
86#define OCTEON_CN70XX_PASS2_0 0x000d9608
87
88#define OCTEON_CN70XX (OCTEON_CN70XX_PASS1_0 | OM_IGNORE_REVISION)
89#define OCTEON_CN70XX_PASS1_X (OCTEON_CN70XX_PASS1_0 | \
90 OM_IGNORE_MINOR_REVISION)
91#define OCTEON_CN70XX_PASS2_X (OCTEON_CN70XX_PASS2_0 | \
92 OM_IGNORE_MINOR_REVISION)
93
94#define OCTEON_CN71XX OCTEON_CN70XX
95
96#define OCTEON_CN78XX_PASS1_0 0x000d9500
97#define OCTEON_CN78XX_PASS1_1 0x000d9501
98#define OCTEON_CN78XX_PASS2_0 0x000d9508
99
100#define OCTEON_CN78XX (OCTEON_CN78XX_PASS1_0 | OM_IGNORE_REVISION)
101#define OCTEON_CN78XX_PASS1_X (OCTEON_CN78XX_PASS1_0 | \
102 OM_IGNORE_MINOR_REVISION)
103#define OCTEON_CN78XX_PASS2_X (OCTEON_CN78XX_PASS2_0 | \
104 OM_IGNORE_MINOR_REVISION)
105
106#define OCTEON_CN76XX (0x000d9540 | OM_CHECK_SUBMODEL)
66 107
67/* 108/*
68 * CNF7XXX models with new revision encoding 109 * CNF7XXX models with new revision encoding
69 */ 110 */
70#define OCTEON_CNF71XX_PASS1_0 0x000d9400 111#define OCTEON_CNF71XX_PASS1_0 0x000d9400
112#define OCTEON_CNF71XX_PASS1_1 0x000d9401
71 113
72#define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION) 114#define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
73#define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) 115#define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
@@ -79,6 +121,8 @@
79#define OCTEON_CN68XX_PASS1_1 0x000d9101 121#define OCTEON_CN68XX_PASS1_1 0x000d9101
80#define OCTEON_CN68XX_PASS1_2 0x000d9102 122#define OCTEON_CN68XX_PASS1_2 0x000d9102
81#define OCTEON_CN68XX_PASS2_0 0x000d9108 123#define OCTEON_CN68XX_PASS2_0 0x000d9108
124#define OCTEON_CN68XX_PASS2_1 0x000d9109
125#define OCTEON_CN68XX_PASS2_2 0x000d910a
82 126
83#define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION) 127#define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
84#define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) 128#define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
@@ -104,11 +148,18 @@
104#define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) 148#define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
105#define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) 149#define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
106 150
151/* CN62XX is same as CN63XX with 1 MB cache */
152#define OCTEON_CN62XX OCTEON_CN63XX
153
107#define OCTEON_CN61XX_PASS1_0 0x000d9300 154#define OCTEON_CN61XX_PASS1_0 0x000d9300
155#define OCTEON_CN61XX_PASS1_1 0x000d9301
108 156
109#define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION) 157#define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
110#define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) 158#define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
111 159
160/* CN60XX is same as CN61XX with 512 KB cache */
161#define OCTEON_CN60XX OCTEON_CN61XX
162
112/* 163/*
113 * CN5XXX models with new revision encoding 164 * CN5XXX models with new revision encoding
114 */ 165 */
@@ -120,7 +171,7 @@
120#define OCTEON_CN58XX_PASS2_2 0x000d030a 171#define OCTEON_CN58XX_PASS2_2 0x000d030a
121#define OCTEON_CN58XX_PASS2_3 0x000d030b 172#define OCTEON_CN58XX_PASS2_3 0x000d030b
122 173
123#define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION) 174#define OCTEON_CN58XX (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_REVISION)
124#define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) 175#define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
125#define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) 176#define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
126#define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X 177#define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X
@@ -217,12 +268,10 @@
217#define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION) 268#define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
218#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS) 269#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
219#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS) 270#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
220 271#define OCTEON_CNF7XXX (OCTEON_CNF71XX_PASS1_0 | \
221/* These are used to cover entire families of OCTEON processors */ 272 OM_MATCH_F7XXX_FAMILY_MODELS)
222#define OCTEON_FAM_1 (OCTEON_CN3XXX) 273#define OCTEON_CN7XXX (OCTEON_CN78XX_PASS1_0 | \
223#define OCTEON_FAM_PLUS (OCTEON_CN5XXX) 274 OM_MATCH_7XXX_FAMILY_MODELS)
224#define OCTEON_FAM_1_PLUS (OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS)
225#define OCTEON_FAM_2 (OCTEON_CN6XXX)
226 275
227/* The revision byte (low byte) has two different encodings. 276/* The revision byte (low byte) has two different encodings.
228 * CN3XXX: 277 * CN3XXX:
@@ -232,7 +281,7 @@
232 * <4>: alternate package 281 * <4>: alternate package
233 * <3:0>: revision 282 * <3:0>: revision
234 * 283 *
235 * CN5XXX: 284 * CN5XXX and older models:
236 * 285 *
237 * bits 286 * bits
238 * <7>: reserved (0) 287 * <7>: reserved (0)
@@ -251,17 +300,21 @@
251/* CN5XXX and later use different layout of bits in the revision ID field */ 300/* CN5XXX and later use different layout of bits in the revision ID field */
252#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK 301#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK
253#define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f 302#define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f
254#define OCTEON_58XX_MODEL_MASK 0x00ffffc0 303#define OCTEON_58XX_MODEL_MASK 0x00ffff40
255#define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK) 304#define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
256#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8) 305#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00ffff38)
257#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0 306#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
258 307
259/* forward declarations */
260static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure)); 308static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
261static inline uint64_t cvmx_read_csr(uint64_t csr_addr); 309static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
262 310
263#define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z))) 311#define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z)))
264 312
313/*
314 * __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model)
315 * returns true if chip_model is identical or belong to the OCTEON
316 * model group specified in arg_model.
317 */
265/* NOTE: This for internal use only! */ 318/* NOTE: This for internal use only! */
266#define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \ 319#define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \
267((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \ 320((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \
@@ -286,11 +339,18 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
286 ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \ 339 ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \
287 && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \ 340 && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \
288 ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \ 341 ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \
289 && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_REV_MASK)) || \ 342 && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \
290 ((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \ 343 ((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \
291 && ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0)) || \ 344 && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN58XX_PASS1_0) \
345 && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN63XX_PASS1_0)) || \
292 ((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \ 346 ((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \
293 && ((chip_model) >= OCTEON_CN63XX_PASS1_0)) || \ 347 && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN63XX_PASS1_0) \
348 && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CNF71XX_PASS1_0)) || \
349 ((((arg_model) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == OM_MATCH_F7XXX_FAMILY_MODELS) \
350 && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CNF71XX_PASS1_0) \
351 && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN78XX_PASS1_0)) || \
352 ((((arg_model) & (OM_MATCH_7XXX_FAMILY_MODELS)) == OM_MATCH_7XXX_FAMILY_MODELS) \
353 && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN78XX_PASS1_0)) || \
294 ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \ 354 ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
295 && (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \ 355 && (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \
296 ))) 356 )))
@@ -300,14 +360,6 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
300{ 360{
301 uint32_t cpuid = cvmx_get_proc_id(); 361 uint32_t cpuid = cvmx_get_proc_id();
302 362
303 /*
304 * Check for special case of mismarked 3005 samples. We only
305 * need to check if the sub model isn't being ignored
306 */
307 if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL) {
308 if (cpuid == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34)))
309 cpuid |= 0x10;
310 }
311 return __OCTEON_IS_MODEL_COMPILE__(model, cpuid); 363 return __OCTEON_IS_MODEL_COMPILE__(model, cpuid);
312} 364}
313 365
@@ -326,10 +378,21 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
326#define OCTEON_IS_COMMON_BINARY() 1 378#define OCTEON_IS_COMMON_BINARY() 1
327#undef OCTEON_MODEL 379#undef OCTEON_MODEL
328 380
381#define OCTEON_IS_OCTEON1() OCTEON_IS_MODEL(OCTEON_CN3XXX)
382#define OCTEON_IS_OCTEONPLUS() OCTEON_IS_MODEL(OCTEON_CN5XXX)
383#define OCTEON_IS_OCTEON2() \
384 (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
385
386#define OCTEON_IS_OCTEON3() OCTEON_IS_MODEL(OCTEON_CN7XXX)
387
388#define OCTEON_IS_OCTEON1PLUS() (OCTEON_IS_OCTEON1() || OCTEON_IS_OCTEONPLUS())
389
329const char *__init octeon_model_get_string(uint32_t chip_id); 390const char *__init octeon_model_get_string(uint32_t chip_id);
330 391
331/* 392/*
332 * Return the octeon family, i.e., ProcessorID of the PrID register. 393 * Return the octeon family, i.e., ProcessorID of the PrID register.
394 *
395 * @return the octeon family on success, ((unint32_t)-1) on error.
333 */ 396 */
334static inline uint32_t cvmx_get_octeon_family(void) 397static inline uint32_t cvmx_get_octeon_family(void)
335{ 398{
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index 6dfefd2d5cdf..041596570856 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -9,6 +9,7 @@
9#define __ASM_OCTEON_OCTEON_H 9#define __ASM_OCTEON_OCTEON_H
10 10
11#include <asm/octeon/cvmx.h> 11#include <asm/octeon/cvmx.h>
12#include <asm/bitfield.h>
12 13
13extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size, 14extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size,
14 uint64_t alignment, 15 uint64_t alignment,
@@ -53,6 +54,7 @@ extern void octeon_io_clk_delay(unsigned long);
53#define OCTOEN_SERIAL_LEN 20 54#define OCTOEN_SERIAL_LEN 20
54 55
55struct octeon_boot_descriptor { 56struct octeon_boot_descriptor {
57#ifdef __BIG_ENDIAN_BITFIELD
56 /* Start of block referenced by assembly code - do not change! */ 58 /* Start of block referenced by assembly code - do not change! */
57 uint32_t desc_version; 59 uint32_t desc_version;
58 uint32_t desc_size; 60 uint32_t desc_size;
@@ -104,77 +106,149 @@ struct octeon_boot_descriptor {
104 uint8_t mac_addr_base[6]; 106 uint8_t mac_addr_base[6];
105 uint8_t mac_addr_count; 107 uint8_t mac_addr_count;
106 uint64_t cvmx_desc_vaddr; 108 uint64_t cvmx_desc_vaddr;
109#else
110 uint32_t desc_size;
111 uint32_t desc_version;
112 uint64_t stack_top;
113 uint64_t heap_base;
114 uint64_t heap_end;
115 /* Only used by bootloader */
116 uint64_t entry_point;
117 uint64_t desc_vaddr;
118 /* End of This block referenced by assembly code - do not change! */
119 uint32_t stack_size;
120 uint32_t exception_base_addr;
121 uint32_t argc;
122 uint32_t heap_size;
123 /*
124 * Argc count for application.
125 * Warning low bit scrambled in little-endian.
126 */
127 uint32_t argv[OCTEON_ARGV_MAX_ARGS];
128
129#define BOOT_FLAG_INIT_CORE (1 << 0)
130#define OCTEON_BL_FLAG_DEBUG (1 << 1)
131#define OCTEON_BL_FLAG_NO_MAGIC (1 << 2)
132 /* If set, use uart1 for console */
133#define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3)
134 /* If set, use PCI console */
135#define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4)
136 /* Call exit on break on serial port */
137#define OCTEON_BL_FLAG_BREAK (1 << 5)
138
139 uint32_t core_mask;
140 uint32_t flags;
141 /* physical address of free memory descriptor block. */
142 uint32_t phy_mem_desc_addr;
143 /* DRAM size in megabyes. */
144 uint32_t dram_size;
145 /* CPU clock speed, in hz. */
146 uint32_t eclock_hz;
147 /* used to pass flags from app to debugger. */
148 uint32_t debugger_flags_base_addr;
149 /* SPI4 clock in hz. */
150 uint32_t spi_clock_hz;
151 /* DRAM clock speed, in hz. */
152 uint32_t dclock_hz;
153 uint8_t chip_rev_minor;
154 uint8_t chip_rev_major;
155 uint16_t chip_type;
156 uint8_t board_rev_minor;
157 uint8_t board_rev_major;
158 uint16_t board_type;
159
160 uint64_t unused1[4]; /* Not even filled in by bootloader. */
161
162 uint64_t cvmx_desc_vaddr;
163#endif
107}; 164};
108 165
109union octeon_cvmemctl { 166union octeon_cvmemctl {
110 uint64_t u64; 167 uint64_t u64;
111 struct { 168 struct {
112 /* RO 1 = BIST fail, 0 = BIST pass */ 169 /* RO 1 = BIST fail, 0 = BIST pass */
113 uint64_t tlbbist:1; 170 __BITFIELD_FIELD(uint64_t tlbbist:1,
114 /* RO 1 = BIST fail, 0 = BIST pass */ 171 /* RO 1 = BIST fail, 0 = BIST pass */
115 uint64_t l1cbist:1; 172 __BITFIELD_FIELD(uint64_t l1cbist:1,
116 /* RO 1 = BIST fail, 0 = BIST pass */ 173 /* RO 1 = BIST fail, 0 = BIST pass */
117 uint64_t l1dbist:1; 174 __BITFIELD_FIELD(uint64_t l1dbist:1,
118 /* RO 1 = BIST fail, 0 = BIST pass */ 175 /* RO 1 = BIST fail, 0 = BIST pass */
119 uint64_t dcmbist:1; 176 __BITFIELD_FIELD(uint64_t dcmbist:1,
120 /* RO 1 = BIST fail, 0 = BIST pass */ 177 /* RO 1 = BIST fail, 0 = BIST pass */
121 uint64_t ptgbist:1; 178 __BITFIELD_FIELD(uint64_t ptgbist:1,
122 /* RO 1 = BIST fail, 0 = BIST pass */ 179 /* RO 1 = BIST fail, 0 = BIST pass */
123 uint64_t wbfbist:1; 180 __BITFIELD_FIELD(uint64_t wbfbist:1,
124 /* Reserved */ 181 /* Reserved */
125 uint64_t reserved:22; 182 __BITFIELD_FIELD(uint64_t reserved:17,
183 /* OCTEON II - TLB replacement policy: 0 = bitmask LRU; 1 = NLU.
184 * This field selects between the TLB replacement policies:
185 * bitmask LRU or NLU. Bitmask LRU maintains a mask of
186 * recently used TLB entries and avoids them as new entries
187 * are allocated. NLU simply guarantees that the next
188 * allocation is not the last used TLB entry. */
189 __BITFIELD_FIELD(uint64_t tlbnlu:1,
190 /* OCTEON II - Selects the bit in the counter used for
191 * releasing a PAUSE. This counter trips every 2(8+PAUSETIME)
192 * cycles. If not already released, the cnMIPS II core will
193 * always release a given PAUSE instruction within
194 * 2(8+PAUSETIME). If the counter trip happens to line up,
195 * the cnMIPS II core may release the PAUSE instantly. */
196 __BITFIELD_FIELD(uint64_t pausetime:3,
197 /* OCTEON II - This field is an extension of
198 * CvmMemCtl[DIDTTO] */
199 __BITFIELD_FIELD(uint64_t didtto2:1,
126 /* R/W If set, marked write-buffer entries time out 200 /* R/W If set, marked write-buffer entries time out
127 * the same as as other entries; if clear, marked 201 * the same as as other entries; if clear, marked
128 * write-buffer entries use the maximum timeout. */ 202 * write-buffer entries use the maximum timeout. */
129 uint64_t dismarkwblongto:1; 203 __BITFIELD_FIELD(uint64_t dismarkwblongto:1,
130 /* R/W If set, a merged store does not clear the 204 /* R/W If set, a merged store does not clear the
131 * write-buffer entry timeout state. */ 205 * write-buffer entry timeout state. */
132 uint64_t dismrgclrwbto:1; 206 __BITFIELD_FIELD(uint64_t dismrgclrwbto:1,
133 /* R/W Two bits that are the MSBs of the resultant 207 /* R/W Two bits that are the MSBs of the resultant
134 * CVMSEG LM word location for an IOBDMA. The other 8 208 * CVMSEG LM word location for an IOBDMA. The other 8
135 * bits come from the SCRADDR field of the IOBDMA. */ 209 * bits come from the SCRADDR field of the IOBDMA. */
136 uint64_t iobdmascrmsb:2; 210 __BITFIELD_FIELD(uint64_t iobdmascrmsb:2,
137 /* R/W If set, SYNCWS and SYNCS only order marked 211 /* R/W If set, SYNCWS and SYNCS only order marked
138 * stores; if clear, SYNCWS and SYNCS only order 212 * stores; if clear, SYNCWS and SYNCS only order
139 * unmarked stores. SYNCWSMARKED has no effect when 213 * unmarked stores. SYNCWSMARKED has no effect when
140 * DISSYNCWS is set. */ 214 * DISSYNCWS is set. */
141 uint64_t syncwsmarked:1; 215 __BITFIELD_FIELD(uint64_t syncwsmarked:1,
142 /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as 216 /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as
143 * SYNC. */ 217 * SYNC. */
144 uint64_t dissyncws:1; 218 __BITFIELD_FIELD(uint64_t dissyncws:1,
145 /* R/W If set, no stall happens on write buffer 219 /* R/W If set, no stall happens on write buffer
146 * full. */ 220 * full. */
147 uint64_t diswbfst:1; 221 __BITFIELD_FIELD(uint64_t diswbfst:1,
148 /* R/W If set (and SX set), supervisor-level 222 /* R/W If set (and SX set), supervisor-level
149 * loads/stores can use XKPHYS addresses with 223 * loads/stores can use XKPHYS addresses with
150 * VA<48>==0 */ 224 * VA<48>==0 */
151 uint64_t xkmemenas:1; 225 __BITFIELD_FIELD(uint64_t xkmemenas:1,
152 /* R/W If set (and UX set), user-level loads/stores 226 /* R/W If set (and UX set), user-level loads/stores
153 * can use XKPHYS addresses with VA<48>==0 */ 227 * can use XKPHYS addresses with VA<48>==0 */
154 uint64_t xkmemenau:1; 228 __BITFIELD_FIELD(uint64_t xkmemenau:1,
155 /* R/W If set (and SX set), supervisor-level 229 /* R/W If set (and SX set), supervisor-level
156 * loads/stores can use XKPHYS addresses with 230 * loads/stores can use XKPHYS addresses with
157 * VA<48>==1 */ 231 * VA<48>==1 */
158 uint64_t xkioenas:1; 232 __BITFIELD_FIELD(uint64_t xkioenas:1,
159 /* R/W If set (and UX set), user-level loads/stores 233 /* R/W If set (and UX set), user-level loads/stores
160 * can use XKPHYS addresses with VA<48>==1 */ 234 * can use XKPHYS addresses with VA<48>==1 */
161 uint64_t xkioenau:1; 235 __BITFIELD_FIELD(uint64_t xkioenau:1,
162 /* R/W If set, all stores act as SYNCW (NOMERGE must 236 /* R/W If set, all stores act as SYNCW (NOMERGE must
163 * be set when this is set) RW, reset to 0. */ 237 * be set when this is set) RW, reset to 0. */
164 uint64_t allsyncw:1; 238 __BITFIELD_FIELD(uint64_t allsyncw:1,
165 /* R/W If set, no stores merge, and all stores reach 239 /* R/W If set, no stores merge, and all stores reach
166 * the coherent bus in order. */ 240 * the coherent bus in order. */
167 uint64_t nomerge:1; 241 __BITFIELD_FIELD(uint64_t nomerge:1,
168 /* R/W Selects the bit in the counter used for DID 242 /* R/W Selects the bit in the counter used for DID
169 * time-outs 0 = 231, 1 = 230, 2 = 229, 3 = 243 * time-outs 0 = 231, 1 = 230, 2 = 229, 3 =
170 * 214. Actual time-out is between 1x and 2x this 244 * 214. Actual time-out is between 1x and 2x this
171 * interval. For example, with DIDTTO=3, expiration 245 * interval. For example, with DIDTTO=3, expiration
172 * interval is between 16K and 32K. */ 246 * interval is between 16K and 32K. */
173 uint64_t didtto:2; 247 __BITFIELD_FIELD(uint64_t didtto:2,
174 /* R/W If set, the (mem) CSR clock never turns off. */ 248 /* R/W If set, the (mem) CSR clock never turns off. */
175 uint64_t csrckalwys:1; 249 __BITFIELD_FIELD(uint64_t csrckalwys:1,
176 /* R/W If set, mclk never turns off. */ 250 /* R/W If set, mclk never turns off. */
177 uint64_t mclkalwys:1; 251 __BITFIELD_FIELD(uint64_t mclkalwys:1,
178 /* R/W Selects the bit in the counter used for write 252 /* R/W Selects the bit in the counter used for write
179 * buffer flush time-outs (WBFLT+11) is the bit 253 * buffer flush time-outs (WBFLT+11) is the bit
180 * position in an internal counter used to determine 254 * position in an internal counter used to determine
@@ -182,25 +256,26 @@ union octeon_cvmemctl {
182 * 2x this interval. For example, with WBFLT = 0, a 256 * 2x this interval. For example, with WBFLT = 0, a
183 * write buffer expires between 2K and 4K cycles after 257 * write buffer expires between 2K and 4K cycles after
184 * the write buffer entry is allocated. */ 258 * the write buffer entry is allocated. */
185 uint64_t wbfltime:3; 259 __BITFIELD_FIELD(uint64_t wbfltime:3,
186 /* R/W If set, do not put Istream in the L2 cache. */ 260 /* R/W If set, do not put Istream in the L2 cache. */
187 uint64_t istrnol2:1; 261 __BITFIELD_FIELD(uint64_t istrnol2:1,
188 /* R/W The write buffer threshold. */ 262 /* R/W The write buffer threshold. */
189 uint64_t wbthresh:4; 263 __BITFIELD_FIELD(uint64_t wbthresh:4,
190 /* Reserved */ 264 /* Reserved */
191 uint64_t reserved2:2; 265 __BITFIELD_FIELD(uint64_t reserved2:2,
192 /* R/W If set, CVMSEG is available for loads/stores in 266 /* R/W If set, CVMSEG is available for loads/stores in
193 * kernel/debug mode. */ 267 * kernel/debug mode. */
194 uint64_t cvmsegenak:1; 268 __BITFIELD_FIELD(uint64_t cvmsegenak:1,
195 /* R/W If set, CVMSEG is available for loads/stores in 269 /* R/W If set, CVMSEG is available for loads/stores in
196 * supervisor mode. */ 270 * supervisor mode. */
197 uint64_t cvmsegenas:1; 271 __BITFIELD_FIELD(uint64_t cvmsegenas:1,
198 /* R/W If set, CVMSEG is available for loads/stores in 272 /* R/W If set, CVMSEG is available for loads/stores in
199 * user mode. */ 273 * user mode. */
200 uint64_t cvmsegenau:1; 274 __BITFIELD_FIELD(uint64_t cvmsegenau:1,
201 /* R/W Size of local memory in cache blocks, 54 (6912 275 /* R/W Size of local memory in cache blocks, 54 (6912
202 * bytes) is max legal value. */ 276 * bytes) is max legal value. */
203 uint64_t lmemsz:6; 277 __BITFIELD_FIELD(uint64_t lmemsz:6,
278 ;)))))))))))))))))))))))))))))))))
204 } s; 279 } s;
205}; 280};
206 281
@@ -224,6 +299,19 @@ static inline void octeon_npi_write32(uint64_t address, uint32_t val)
224 cvmx_read64_uint32(address ^ 4); 299 cvmx_read64_uint32(address ^ 4);
225} 300}
226 301
302/* Octeon multiplier save/restore routines from octeon_switch.S */
303void octeon_mult_save(void);
304void octeon_mult_restore(void);
305void octeon_mult_save_end(void);
306void octeon_mult_restore_end(void);
307void octeon_mult_save3(void);
308void octeon_mult_save3_end(void);
309void octeon_mult_save2(void);
310void octeon_mult_save2_end(void);
311void octeon_mult_restore3(void);
312void octeon_mult_restore3_end(void);
313void octeon_mult_restore2(void);
314void octeon_mult_restore2_end(void);
227 315
228/** 316/**
229 * Read a 32bit value from the Octeon NPI register space 317 * Read a 32bit value from the Octeon NPI register space
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 69529624a005..193b4c6b7541 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -121,6 +121,7 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
121} 121}
122#endif 122#endif
123 123
124#ifdef CONFIG_PCI_DOMAINS
124#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index 125#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
125 126
126static inline int pci_proc_domain(struct pci_bus *bus) 127static inline int pci_proc_domain(struct pci_bus *bus)
@@ -128,6 +129,7 @@ static inline int pci_proc_domain(struct pci_bus *bus)
128 struct pci_controller *hose = bus->sysdata; 129 struct pci_controller *hose = bus->sysdata;
129 return hose->need_domain_info; 130 return hose->need_domain_info;
130} 131}
132#endif /* CONFIG_PCI_DOMAINS */
131 133
132#endif /* __KERNEL__ */ 134#endif /* __KERNEL__ */
133 135
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index fc807aa5ec8d..91747c282bb3 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -35,7 +35,7 @@
35#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 35#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
36 36
37/* 37/*
38 * The following bits are directly used by the TLB hardware 38 * The following bits are implemented by the TLB hardware
39 */ 39 */
40#define _PAGE_GLOBAL_SHIFT 0 40#define _PAGE_GLOBAL_SHIFT 0
41#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 41#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
@@ -60,43 +60,40 @@
60#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) 60#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
61#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 61#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
62 62
63#define _PAGE_SILENT_READ _PAGE_VALID
64#define _PAGE_SILENT_WRITE _PAGE_DIRTY
65
66#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) 63#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
67 64
68#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 65#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
69 66
70/* 67/*
71 * The following are implemented by software 68 * The following bits are implemented in software
72 */ 69 */
73#define _PAGE_PRESENT_SHIFT 0 70#define _PAGE_PRESENT_SHIFT (0)
74#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 71#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
75#define _PAGE_READ_SHIFT 1 72#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
76#define _PAGE_READ (1 << _PAGE_READ_SHIFT) 73#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
77#define _PAGE_WRITE_SHIFT 2 74#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
78#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 75#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
79#define _PAGE_ACCESSED_SHIFT 3 76#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
80#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 77#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
81#define _PAGE_MODIFIED_SHIFT 4 78#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
82#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 79#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
83 80
84/* 81/*
85 * And these are the hardware TLB bits 82 * The following bits are implemented by the TLB hardware
86 */ 83 */
87#define _PAGE_GLOBAL_SHIFT 8 84#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 4)
88#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 85#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
89#define _PAGE_VALID_SHIFT 9 86#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
90#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 87#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
91#define _PAGE_SILENT_READ (1 << _PAGE_VALID_SHIFT) /* synonym */ 88#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
92#define _PAGE_DIRTY_SHIFT 10
93#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 89#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
94#define _PAGE_SILENT_WRITE (1 << _PAGE_DIRTY_SHIFT) 90#define _CACHE_UNCACHED_SHIFT (_PAGE_DIRTY_SHIFT + 1)
95#define _CACHE_UNCACHED_SHIFT 11
96#define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT) 91#define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT)
97#define _CACHE_MASK (1 << _CACHE_UNCACHED_SHIFT) 92#define _CACHE_MASK _CACHE_UNCACHED
98 93
99#else /* 'Normal' r4K case */ 94#define _PFN_SHIFT PAGE_SHIFT
95
96#else
100/* 97/*
101 * When using the RI/XI bit support, we have 13 bits of flags below 98 * When using the RI/XI bit support, we have 13 bits of flags below
102 * the physical address. The RI/XI bits are placed such that a SRL 5 99 * the physical address. The RI/XI bits are placed such that a SRL 5
@@ -107,10 +104,8 @@
107 104
108/* 105/*
109 * The following bits are implemented in software 106 * The following bits are implemented in software
110 *
111 * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi.
112 */ 107 */
113#define _PAGE_PRESENT_SHIFT (0) 108#define _PAGE_PRESENT_SHIFT 0
114#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 109#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
115#define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1) 110#define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
116#define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; }) 111#define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; })
@@ -125,16 +120,11 @@
125/* huge tlb page */ 120/* huge tlb page */
126#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) 121#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
127#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) 122#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
128#else
129#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT)
130#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
131#endif
132
133#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
134/* huge tlb page */
135#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) 123#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1)
136#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) 124#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT)
137#else 125#else
126#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT)
127#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
138#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT) 128#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT)
139#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */ 129#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */
140#endif 130#endif
@@ -149,17 +139,10 @@
149 139
150#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) 140#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
151#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 141#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
152
153#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) 142#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
154#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 143#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
155/* synonym */
156#define _PAGE_SILENT_READ (_PAGE_VALID)
157
158/* The MIPS dirty bit */
159#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) 144#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
160#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 145#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
161#define _PAGE_SILENT_WRITE (_PAGE_DIRTY)
162
163#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) 146#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1)
164#define _CACHE_MASK (7 << _CACHE_SHIFT) 147#define _CACHE_MASK (7 << _CACHE_SHIFT)
165 148
@@ -167,9 +150,9 @@
167 150
168#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ 151#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
169 152
170#ifndef _PFN_SHIFT 153#define _PAGE_SILENT_READ _PAGE_VALID
171#define _PFN_SHIFT PAGE_SHIFT 154#define _PAGE_SILENT_WRITE _PAGE_DIRTY
172#endif 155
173#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) 156#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1))
174 157
175#ifndef _PAGE_NO_READ 158#ifndef _PAGE_NO_READ
@@ -179,9 +162,6 @@
179#ifndef _PAGE_NO_EXEC 162#ifndef _PAGE_NO_EXEC
180#define _PAGE_NO_EXEC ({BUG(); 0; }) 163#define _PAGE_NO_EXEC ({BUG(); 0; })
181#endif 164#endif
182#ifndef _PAGE_GLOBAL_SHIFT
183#define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL)
184#endif
185 165
186 166
187#ifndef __ASSEMBLY__ 167#ifndef __ASSEMBLY__
@@ -266,8 +246,9 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
266#endif 246#endif
267 247
268#define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ)) 248#define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ))
269#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) 249#define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
270 250
271#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) 251#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \
252 _PFN_MASK | _CACHE_MASK)
272 253
273#endif /* _ASM_PGTABLE_BITS_H */ 254#endif /* _ASM_PGTABLE_BITS_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 583ff4215479..bef782c4a44b 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -99,29 +99,35 @@ extern void paging_init(void);
99 99
100#define htw_stop() \ 100#define htw_stop() \
101do { \ 101do { \
102 if (cpu_has_htw) \ 102 unsigned long flags; \
103 write_c0_pwctl(read_c0_pwctl() & \ 103 \
104 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ 104 if (cpu_has_htw) { \
105 local_irq_save(flags); \
106 if(!raw_current_cpu_data.htw_seq++) { \
107 write_c0_pwctl(read_c0_pwctl() & \
108 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
109 back_to_back_c0_hazard(); \
110 } \
111 local_irq_restore(flags); \
112 } \
105} while(0) 113} while(0)
106 114
107#define htw_start() \ 115#define htw_start() \
108do { \ 116do { \
109 if (cpu_has_htw) \ 117 unsigned long flags; \
110 write_c0_pwctl(read_c0_pwctl() | \ 118 \
111 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
112} while(0)
113
114
115#define htw_reset() \
116do { \
117 if (cpu_has_htw) { \ 119 if (cpu_has_htw) { \
118 htw_stop(); \ 120 local_irq_save(flags); \
119 back_to_back_c0_hazard(); \ 121 if (!--raw_current_cpu_data.htw_seq) { \
120 htw_start(); \ 122 write_c0_pwctl(read_c0_pwctl() | \
121 back_to_back_c0_hazard(); \ 123 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
124 back_to_back_c0_hazard(); \
125 } \
126 local_irq_restore(flags); \
122 } \ 127 } \
123} while(0) 128} while(0)
124 129
130
125extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 131extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
126 pte_t pteval); 132 pte_t pteval);
127 133
@@ -153,12 +159,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
153{ 159{
154 pte_t null = __pte(0); 160 pte_t null = __pte(0);
155 161
162 htw_stop();
156 /* Preserve global status for the pair */ 163 /* Preserve global status for the pair */
157 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 164 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
158 null.pte_low = null.pte_high = _PAGE_GLOBAL; 165 null.pte_low = null.pte_high = _PAGE_GLOBAL;
159 166
160 set_pte_at(mm, addr, ptep, null); 167 set_pte_at(mm, addr, ptep, null);
161 htw_reset(); 168 htw_start();
162} 169}
163#else 170#else
164 171
@@ -188,6 +195,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
188 195
189static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 196static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
190{ 197{
198 htw_stop();
191#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) 199#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
192 /* Preserve global status for the pair */ 200 /* Preserve global status for the pair */
193 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) 201 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
@@ -195,7 +203,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
195 else 203 else
196#endif 204#endif
197 set_pte_at(mm, addr, ptep, __pte(0)); 205 set_pte_at(mm, addr, ptep, __pte(0));
198 htw_reset(); 206 htw_start();
199} 207}
200#endif 208#endif
201 209
@@ -334,7 +342,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
334 return pte; 342 return pte;
335} 343}
336 344
337#ifdef _PAGE_HUGE 345#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
338static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 346static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
339 347
340static inline pte_t pte_mkhuge(pte_t pte) 348static inline pte_t pte_mkhuge(pte_t pte)
@@ -342,7 +350,7 @@ static inline pte_t pte_mkhuge(pte_t pte)
342 pte_val(pte) |= _PAGE_HUGE; 350 pte_val(pte) |= _PAGE_HUGE;
343 return pte; 351 return pte;
344} 352}
345#endif /* _PAGE_HUGE */ 353#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
346#endif 354#endif
347static inline int pte_special(pte_t pte) { return 0; } 355static inline int pte_special(pte_t pte) { return 0; }
348static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 356static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index f1df4cb4a286..b5dcbee01fd7 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -54,9 +54,7 @@ extern unsigned int vced_count, vcei_count;
54#define TASK_SIZE 0x7fff8000UL 54#define TASK_SIZE 0x7fff8000UL
55#endif 55#endif
56 56
57#ifdef __KERNEL__
58#define STACK_TOP_MAX TASK_SIZE 57#define STACK_TOP_MAX TASK_SIZE
59#endif
60 58
61#define TASK_IS_32BIT_ADDR 1 59#define TASK_IS_32BIT_ADDR 1
62 60
@@ -73,11 +71,7 @@ extern unsigned int vced_count, vcei_count;
73#define TASK_SIZE32 0x7fff8000UL 71#define TASK_SIZE32 0x7fff8000UL
74#define TASK_SIZE64 0x10000000000UL 72#define TASK_SIZE64 0x10000000000UL
75#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) 73#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
76
77#ifdef __KERNEL__
78#define STACK_TOP_MAX TASK_SIZE64 74#define STACK_TOP_MAX TASK_SIZE64
79#endif
80
81 75
82#define TASK_SIZE_OF(tsk) \ 76#define TASK_SIZE_OF(tsk) \
83 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) 77 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
@@ -211,6 +205,8 @@ struct octeon_cop2_state {
211 unsigned long cop2_gfm_poly; 205 unsigned long cop2_gfm_poly;
212 /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */ 206 /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
213 unsigned long cop2_gfm_result[2]; 207 unsigned long cop2_gfm_result[2];
208 /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */
209 unsigned long cop2_sha3[2];
214}; 210};
215#define COP2_INIT \ 211#define COP2_INIT \
216 .cp2 = {0,}, 212 .cp2 = {0,},
@@ -399,4 +395,15 @@ unsigned long get_wchan(struct task_struct *p);
399 395
400#endif 396#endif
401 397
398/*
399 * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options
400 * to the prctl syscall.
401 */
402extern int mips_get_process_fp_mode(struct task_struct *task);
403extern int mips_set_process_fp_mode(struct task_struct *task,
404 unsigned int value);
405
406#define GET_FP_MODE(task) mips_get_process_fp_mode(task)
407#define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value)
408
402#endif /* _ASM_PROCESSOR_H */ 409#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index eaa26270a5e5..8ebc2aa5f3e1 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -24,13 +24,6 @@ struct boot_param_header;
24extern void __dt_setup_arch(void *bph); 24extern void __dt_setup_arch(void *bph);
25extern int __dt_register_buses(const char *bus0, const char *bus1); 25extern int __dt_register_buses(const char *bus0, const char *bus1);
26 26
27#define dt_setup_arch(sym) \
28({ \
29 extern char __dtb_##sym##_begin[]; \
30 \
31 __dt_setup_arch(__dtb_##sym##_begin); \
32})
33
34#else /* CONFIG_OF */ 27#else /* CONFIG_OF */
35static inline void device_tree_init(void) { } 28static inline void device_tree_init(void) { }
36#endif /* CONFIG_OF */ 29#endif /* CONFIG_OF */
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index fc783f843bdc..ffc320389f40 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -40,8 +40,8 @@ struct pt_regs {
40 unsigned long cp0_cause; 40 unsigned long cp0_cause;
41 unsigned long cp0_epc; 41 unsigned long cp0_epc;
42#ifdef CONFIG_CPU_CAVIUM_OCTEON 42#ifdef CONFIG_CPU_CAVIUM_OCTEON
43 unsigned long long mpl[3]; /* MTM{0,1,2} */ 43 unsigned long long mpl[6]; /* MTM{0-5} */
44 unsigned long long mtp[3]; /* MTP{0,1,2} */ 44 unsigned long long mtp[6]; /* MTP{0-5} */
45#endif 45#endif
46} __aligned(8); 46} __aligned(8);
47 47
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index e293a8d89a6d..1b22d2da88a1 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -14,6 +14,7 @@
14 14
15#include <asm/asm.h> 15#include <asm/asm.h>
16#include <asm/cacheops.h> 16#include <asm/cacheops.h>
17#include <asm/compiler.h>
17#include <asm/cpu-features.h> 18#include <asm/cpu-features.h>
18#include <asm/cpu-type.h> 19#include <asm/cpu-type.h>
19#include <asm/mipsmtregs.h> 20#include <asm/mipsmtregs.h>
@@ -39,7 +40,7 @@ extern void (*r4k_blast_icache)(void);
39 __asm__ __volatile__( \ 40 __asm__ __volatile__( \
40 " .set push \n" \ 41 " .set push \n" \
41 " .set noreorder \n" \ 42 " .set noreorder \n" \
42 " .set arch=r4000 \n" \ 43 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
43 " cache %0, %1 \n" \ 44 " cache %0, %1 \n" \
44 " .set pop \n" \ 45 " .set pop \n" \
45 : \ 46 : \
@@ -147,7 +148,7 @@ static inline void flush_scache_line(unsigned long addr)
147 __asm__ __volatile__( \ 148 __asm__ __volatile__( \
148 " .set push \n" \ 149 " .set push \n" \
149 " .set noreorder \n" \ 150 " .set noreorder \n" \
150 " .set arch=r4000 \n" \ 151 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
151 "1: cache %0, (%1) \n" \ 152 "1: cache %0, (%1) \n" \
152 "2: .set pop \n" \ 153 "2: .set pop \n" \
153 " .section __ex_table,\"a\" \n" \ 154 " .section __ex_table,\"a\" \n" \
@@ -218,6 +219,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
218 cache_op(Page_Invalidate_T, addr); 219 cache_op(Page_Invalidate_T, addr);
219} 220}
220 221
222#ifndef CONFIG_CPU_MIPSR6
221#define cache16_unroll32(base,op) \ 223#define cache16_unroll32(base,op) \
222 __asm__ __volatile__( \ 224 __asm__ __volatile__( \
223 " .set push \n" \ 225 " .set push \n" \
@@ -322,6 +324,150 @@ static inline void invalidate_tcache_page(unsigned long addr)
322 : "r" (base), \ 324 : "r" (base), \
323 "i" (op)); 325 "i" (op));
324 326
327#else
328/*
329 * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
330 * This means we now need to increment the base register before we flush
331 * more cache lines
332 */
333#define cache16_unroll32(base,op) \
334 __asm__ __volatile__( \
335 " .set push\n" \
336 " .set noreorder\n" \
337 " .set mips64r6\n" \
338 " .set noat\n" \
339 " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \
340 " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \
341 " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \
342 " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \
343 " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \
344 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
345 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
346 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
347 " addiu $1, $0, 0x100 \n" \
348 " cache %1, 0x000($1); cache %1, 0x010($1)\n" \
349 " cache %1, 0x020($1); cache %1, 0x030($1)\n" \
350 " cache %1, 0x040($1); cache %1, 0x050($1)\n" \
351 " cache %1, 0x060($1); cache %1, 0x070($1)\n" \
352 " cache %1, 0x080($1); cache %1, 0x090($1)\n" \
353 " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \
354 " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \
355 " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \
356 " .set pop\n" \
357 : \
358 : "r" (base), \
359 "i" (op));
360
361#define cache32_unroll32(base,op) \
362 __asm__ __volatile__( \
363 " .set push\n" \
364 " .set noreorder\n" \
365 " .set mips64r6\n" \
366 " .set noat\n" \
367 " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \
368 " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
369 " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
370 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
371 " addiu $1, %0, 0x100\n" \
372 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
373 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
374 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
375 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
376 " addiu $1, $1, 0x100\n" \
377 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
378 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
379 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
380 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
381 " addiu $1, $1, 0x100\n" \
382 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
383 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
384 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
385 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
386 " .set pop\n" \
387 : \
388 : "r" (base), \
389 "i" (op));
390
391#define cache64_unroll32(base,op) \
392 __asm__ __volatile__( \
393 " .set push\n" \
394 " .set noreorder\n" \
395 " .set mips64r6\n" \
396 " .set noat\n" \
397 " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
398 " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
399 " addiu $1, %0, 0x100\n" \
400 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
401 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
402 " addiu $1, %0, 0x100\n" \
403 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
404 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
405 " addiu $1, %0, 0x100\n" \
406 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
407 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
408 " addiu $1, %0, 0x100\n" \
409 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
410 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
411 " addiu $1, %0, 0x100\n" \
412 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
413 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
414 " addiu $1, %0, 0x100\n" \
415 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
416 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
417 " addiu $1, %0, 0x100\n" \
418 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
419 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
420 " .set pop\n" \
421 : \
422 : "r" (base), \
423 "i" (op));
424
425#define cache128_unroll32(base,op) \
426 __asm__ __volatile__( \
427 " .set push\n" \
428 " .set noreorder\n" \
429 " .set mips64r6\n" \
430 " .set noat\n" \
431 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
432 " addiu $1, %0, 0x100\n" \
433 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
434 " addiu $1, %0, 0x100\n" \
435 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
436 " addiu $1, %0, 0x100\n" \
437 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
438 " addiu $1, %0, 0x100\n" \
439 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
440 " addiu $1, %0, 0x100\n" \
441 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
442 " addiu $1, %0, 0x100\n" \
443 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
444 " addiu $1, %0, 0x100\n" \
445 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
446 " addiu $1, %0, 0x100\n" \
447 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
448 " addiu $1, %0, 0x100\n" \
449 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
450 " addiu $1, %0, 0x100\n" \
451 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
452 " addiu $1, %0, 0x100\n" \
453 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
454 " addiu $1, %0, 0x100\n" \
455 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
456 " addiu $1, %0, 0x100\n" \
457 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
458 " addiu $1, %0, 0x100\n" \
459 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
460 " addiu $1, %0, 0x100\n" \
461 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
462 " addiu $1, %0, 0x100\n" \
463 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
464 " addiu $1, %0, 0x100\n" \
465 " .set pop\n" \
466 : \
467 : "r" (base), \
468 "i" (op));
469#endif /* CONFIG_CPU_MIPSR6 */
470
325/* 471/*
326 * Perform the cache operation specified by op using a user mode virtual 472 * Perform the cache operation specified by op using a user mode virtual
327 * address while in kernel mode. 473 * address while in kernel mode.
diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h
index 753275accd18..195db5045ae5 100644
--- a/arch/mips/include/asm/sgialib.h
+++ b/arch/mips/include/asm/sgialib.h
@@ -11,6 +11,7 @@
11#ifndef _ASM_SGIALIB_H 11#ifndef _ASM_SGIALIB_H
12#define _ASM_SGIALIB_H 12#define _ASM_SGIALIB_H
13 13
14#include <linux/compiler.h>
14#include <asm/sgiarcs.h> 15#include <asm/sgiarcs.h>
15 16
16extern struct linux_romvec *romvec; 17extern struct linux_romvec *romvec;
@@ -70,8 +71,11 @@ extern LONG ArcRead(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
70extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt); 71extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
71 72
72/* Misc. routines. */ 73/* Misc. routines. */
73extern VOID ArcReboot(VOID) __attribute__((noreturn)); 74extern VOID ArcHalt(VOID) __noreturn;
74extern VOID ArcEnterInteractiveMode(VOID) __attribute__((noreturn)); 75extern VOID ArcPowerDown(VOID) __noreturn;
76extern VOID ArcRestart(VOID) __noreturn;
77extern VOID ArcReboot(VOID) __noreturn;
78extern VOID ArcEnterInteractiveMode(VOID) __noreturn;
75extern VOID ArcFlushAllCaches(VOID); 79extern VOID ArcFlushAllCaches(VOID);
76extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID); 80extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID);
77 81
diff --git a/arch/mips/include/asm/siginfo.h b/arch/mips/include/asm/siginfo.h
deleted file mode 100644
index dd9a762646fc..000000000000
--- a/arch/mips/include/asm/siginfo.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1998, 1999, 2001, 2003 Ralf Baechle
7 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_SIGINFO_H
10#define _ASM_SIGINFO_H
11
12#include <uapi/asm/siginfo.h>
13
14
15/*
16 * Duplicated here because of <asm-generic/siginfo.h> braindamage ...
17 */
18#include <linux/string.h>
19
20static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
21{
22 if (from->si_code < 0)
23 memcpy(to, from, sizeof(*to));
24 else
25 /* _sigchld is currently the largest know union member */
26 memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld));
27}
28
29#endif /* _ASM_SIGINFO_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index c6d06d383ef9..b4548690ade9 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -89,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
89 " subu %[ticket], %[ticket], 1 \n" 89 " subu %[ticket], %[ticket], 1 \n"
90 " .previous \n" 90 " .previous \n"
91 " .set pop \n" 91 " .set pop \n"
92 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 92 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
93 [serving_now_ptr] "+m" (lock->h.serving_now), 93 [serving_now_ptr] "+m" (lock->h.serving_now),
94 [ticket] "=&r" (tmp), 94 [ticket] "=&r" (tmp),
95 [my_ticket] "=&r" (my_ticket) 95 [my_ticket] "=&r" (my_ticket)
@@ -122,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
122 " subu %[ticket], %[ticket], 1 \n" 122 " subu %[ticket], %[ticket], 1 \n"
123 " .previous \n" 123 " .previous \n"
124 " .set pop \n" 124 " .set pop \n"
125 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 125 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
126 [serving_now_ptr] "+m" (lock->h.serving_now), 126 [serving_now_ptr] "+m" (lock->h.serving_now),
127 [ticket] "=&r" (tmp), 127 [ticket] "=&r" (tmp),
128 [my_ticket] "=&r" (my_ticket) 128 [my_ticket] "=&r" (my_ticket)
@@ -164,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
164 " li %[ticket], 0 \n" 164 " li %[ticket], 0 \n"
165 " .previous \n" 165 " .previous \n"
166 " .set pop \n" 166 " .set pop \n"
167 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 167 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
168 [ticket] "=&r" (tmp), 168 [ticket] "=&r" (tmp),
169 [my_ticket] "=&r" (tmp2), 169 [my_ticket] "=&r" (tmp2),
170 [now_serving] "=&r" (tmp3) 170 [now_serving] "=&r" (tmp3)
@@ -188,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
188 " li %[ticket], 0 \n" 188 " li %[ticket], 0 \n"
189 " .previous \n" 189 " .previous \n"
190 " .set pop \n" 190 " .set pop \n"
191 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 191 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
192 [ticket] "=&r" (tmp), 192 [ticket] "=&r" (tmp),
193 [my_ticket] "=&r" (tmp2), 193 [my_ticket] "=&r" (tmp2),
194 [now_serving] "=&r" (tmp3) 194 [now_serving] "=&r" (tmp3)
@@ -235,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
235 " beqzl %1, 1b \n" 235 " beqzl %1, 1b \n"
236 " nop \n" 236 " nop \n"
237 " .set reorder \n" 237 " .set reorder \n"
238 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 238 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
239 : GCC_OFF12_ASM() (rw->lock) 239 : GCC_OFF_SMALL_ASM() (rw->lock)
240 : "memory"); 240 : "memory");
241 } else { 241 } else {
242 do { 242 do {
@@ -245,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
245 " bltz %1, 1b \n" 245 " bltz %1, 1b \n"
246 " addu %1, 1 \n" 246 " addu %1, 1 \n"
247 "2: sc %1, %0 \n" 247 "2: sc %1, %0 \n"
248 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 248 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
249 : GCC_OFF12_ASM() (rw->lock) 249 : GCC_OFF_SMALL_ASM() (rw->lock)
250 : "memory"); 250 : "memory");
251 } while (unlikely(!tmp)); 251 } while (unlikely(!tmp));
252 } 252 }
@@ -254,9 +254,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
254 smp_llsc_mb(); 254 smp_llsc_mb();
255} 255}
256 256
257/* Note the use of sub, not subu which will make the kernel die with an
258 overflow exception if we ever try to unlock an rwlock that is already
259 unlocked or is being held by a writer. */
260static inline void arch_read_unlock(arch_rwlock_t *rw) 257static inline void arch_read_unlock(arch_rwlock_t *rw)
261{ 258{
262 unsigned int tmp; 259 unsigned int tmp;
@@ -266,20 +263,20 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
266 if (R10000_LLSC_WAR) { 263 if (R10000_LLSC_WAR) {
267 __asm__ __volatile__( 264 __asm__ __volatile__(
268 "1: ll %1, %2 # arch_read_unlock \n" 265 "1: ll %1, %2 # arch_read_unlock \n"
269 " sub %1, 1 \n" 266 " addiu %1, 1 \n"
270 " sc %1, %0 \n" 267 " sc %1, %0 \n"
271 " beqzl %1, 1b \n" 268 " beqzl %1, 1b \n"
272 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 269 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
273 : GCC_OFF12_ASM() (rw->lock) 270 : GCC_OFF_SMALL_ASM() (rw->lock)
274 : "memory"); 271 : "memory");
275 } else { 272 } else {
276 do { 273 do {
277 __asm__ __volatile__( 274 __asm__ __volatile__(
278 "1: ll %1, %2 # arch_read_unlock \n" 275 "1: ll %1, %2 # arch_read_unlock \n"
279 " sub %1, 1 \n" 276 " addiu %1, -1 \n"
280 " sc %1, %0 \n" 277 " sc %1, %0 \n"
281 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 278 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
282 : GCC_OFF12_ASM() (rw->lock) 279 : GCC_OFF_SMALL_ASM() (rw->lock)
283 : "memory"); 280 : "memory");
284 } while (unlikely(!tmp)); 281 } while (unlikely(!tmp));
285 } 282 }
@@ -299,8 +296,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
299 " beqzl %1, 1b \n" 296 " beqzl %1, 1b \n"
300 " nop \n" 297 " nop \n"
301 " .set reorder \n" 298 " .set reorder \n"
302 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 299 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
303 : GCC_OFF12_ASM() (rw->lock) 300 : GCC_OFF_SMALL_ASM() (rw->lock)
304 : "memory"); 301 : "memory");
305 } else { 302 } else {
306 do { 303 do {
@@ -309,8 +306,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
309 " bnez %1, 1b \n" 306 " bnez %1, 1b \n"
310 " lui %1, 0x8000 \n" 307 " lui %1, 0x8000 \n"
311 "2: sc %1, %0 \n" 308 "2: sc %1, %0 \n"
312 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 309 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
313 : GCC_OFF12_ASM() (rw->lock) 310 : GCC_OFF_SMALL_ASM() (rw->lock)
314 : "memory"); 311 : "memory");
315 } while (unlikely(!tmp)); 312 } while (unlikely(!tmp));
316 } 313 }
@@ -349,8 +346,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
349 __WEAK_LLSC_MB 346 __WEAK_LLSC_MB
350 " li %2, 1 \n" 347 " li %2, 1 \n"
351 "2: \n" 348 "2: \n"
352 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 349 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
353 : GCC_OFF12_ASM() (rw->lock) 350 : GCC_OFF_SMALL_ASM() (rw->lock)
354 : "memory"); 351 : "memory");
355 } else { 352 } else {
356 __asm__ __volatile__( 353 __asm__ __volatile__(
@@ -366,8 +363,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
366 __WEAK_LLSC_MB 363 __WEAK_LLSC_MB
367 " li %2, 1 \n" 364 " li %2, 1 \n"
368 "2: \n" 365 "2: \n"
369 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 366 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
370 : GCC_OFF12_ASM() (rw->lock) 367 : GCC_OFF_SMALL_ASM() (rw->lock)
371 : "memory"); 368 : "memory");
372 } 369 }
373 370
@@ -393,8 +390,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
393 " li %2, 1 \n" 390 " li %2, 1 \n"
394 " .set reorder \n" 391 " .set reorder \n"
395 "2: \n" 392 "2: \n"
396 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 393 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
397 : GCC_OFF12_ASM() (rw->lock) 394 : GCC_OFF_SMALL_ASM() (rw->lock)
398 : "memory"); 395 : "memory");
399 } else { 396 } else {
400 do { 397 do {
@@ -406,9 +403,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
406 " sc %1, %0 \n" 403 " sc %1, %0 \n"
407 " li %2, 1 \n" 404 " li %2, 1 \n"
408 "2: \n" 405 "2: \n"
409 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), 406 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
410 "=&r" (ret) 407 "=&r" (ret)
411 : GCC_OFF12_ASM() (rw->lock) 408 : GCC_OFF_SMALL_ASM() (rw->lock)
412 : "memory"); 409 : "memory");
413 } while (unlikely(!tmp)); 410 } while (unlikely(!tmp));
414 411
diff --git a/arch/mips/include/asm/spram.h b/arch/mips/include/asm/spram.h
index 0b89006e4907..0f90d88e464d 100644
--- a/arch/mips/include/asm/spram.h
+++ b/arch/mips/include/asm/spram.h
@@ -1,10 +1,10 @@
1#ifndef _MIPS_SPRAM_H 1#ifndef _MIPS_SPRAM_H
2#define _MIPS_SPRAM_H 2#define _MIPS_SPRAM_H
3 3
4#ifdef CONFIG_CPU_MIPSR2 4#if defined(CONFIG_MIPS_SPRAM)
5extern __init void spram_config(void); 5extern __init void spram_config(void);
6#else 6#else
7static inline void spram_config(void) { }; 7static inline void spram_config(void) { };
8#endif /* CONFIG_CPU_MIPSR2 */ 8#endif /* CONFIG_MIPS_SPRAM */
9 9
10#endif /* _MIPS_SPRAM_H */ 10#endif /* _MIPS_SPRAM_H */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index b188c797565c..28d6d9364bd1 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -40,7 +40,7 @@
40 LONG_S v1, PT_HI(sp) 40 LONG_S v1, PT_HI(sp)
41 mflhxu v1 41 mflhxu v1
42 LONG_S v1, PT_ACX(sp) 42 LONG_S v1, PT_ACX(sp)
43#else 43#elif !defined(CONFIG_CPU_MIPSR6)
44 mfhi v1 44 mfhi v1
45#endif 45#endif
46#ifdef CONFIG_32BIT 46#ifdef CONFIG_32BIT
@@ -50,7 +50,7 @@
50 LONG_S $10, PT_R10(sp) 50 LONG_S $10, PT_R10(sp)
51 LONG_S $11, PT_R11(sp) 51 LONG_S $11, PT_R11(sp)
52 LONG_S $12, PT_R12(sp) 52 LONG_S $12, PT_R12(sp)
53#ifndef CONFIG_CPU_HAS_SMARTMIPS 53#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
54 LONG_S v1, PT_HI(sp) 54 LONG_S v1, PT_HI(sp)
55 mflo v1 55 mflo v1
56#endif 56#endif
@@ -58,7 +58,7 @@
58 LONG_S $14, PT_R14(sp) 58 LONG_S $14, PT_R14(sp)
59 LONG_S $15, PT_R15(sp) 59 LONG_S $15, PT_R15(sp)
60 LONG_S $24, PT_R24(sp) 60 LONG_S $24, PT_R24(sp)
61#ifndef CONFIG_CPU_HAS_SMARTMIPS 61#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
62 LONG_S v1, PT_LO(sp) 62 LONG_S v1, PT_LO(sp)
63#endif 63#endif
64#ifdef CONFIG_CPU_CAVIUM_OCTEON 64#ifdef CONFIG_CPU_CAVIUM_OCTEON
@@ -226,7 +226,7 @@
226 mtlhx $24 226 mtlhx $24
227 LONG_L $24, PT_LO(sp) 227 LONG_L $24, PT_LO(sp)
228 mtlhx $24 228 mtlhx $24
229#else 229#elif !defined(CONFIG_CPU_MIPSR6)
230 LONG_L $24, PT_LO(sp) 230 LONG_L $24, PT_LO(sp)
231 mtlo $24 231 mtlo $24
232 LONG_L $24, PT_HI(sp) 232 LONG_L $24, PT_HI(sp)
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index b928b6f898cd..e92d6c4b5ed1 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -75,9 +75,12 @@ do { \
75#endif 75#endif
76 76
77#define __clear_software_ll_bit() \ 77#define __clear_software_ll_bit() \
78do { \ 78do { if (cpu_has_rw_llb) { \
79 if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ 79 write_c0_lladdr(0); \
80 ll_bit = 0; \ 80 } else { \
81 if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\
82 ll_bit = 0; \
83 } \
81} while (0) 84} while (0)
82 85
83#define switch_to(prev, next, last) \ 86#define switch_to(prev, next, last) \
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 9e1295f874f0..55ed6602204c 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -28,7 +28,7 @@ struct thread_info {
28 unsigned long tp_value; /* thread pointer */ 28 unsigned long tp_value; /* thread pointer */
29 __u32 cpu; /* current CPU */ 29 __u32 cpu; /* current CPU */
30 int preempt_count; /* 0 => preemptable, <0 => BUG */ 30 int preempt_count; /* 0 => preemptable, <0 => BUG */
31 31 int r2_emul_return; /* 1 => Returning from R2 emulator */
32 mm_segment_t addr_limit; /* 32 mm_segment_t addr_limit; /*
33 * thread address space limit: 33 * thread address space limit:
34 * 0x7fffffff for user-thead 34 * 0x7fffffff for user-thead
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 89c22433b1c6..fc0cf5ac0cf7 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -21,20 +21,20 @@
21enum major_op { 21enum major_op {
22 spec_op, bcond_op, j_op, jal_op, 22 spec_op, bcond_op, j_op, jal_op,
23 beq_op, bne_op, blez_op, bgtz_op, 23 beq_op, bne_op, blez_op, bgtz_op,
24 addi_op, addiu_op, slti_op, sltiu_op, 24 addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op,
25 andi_op, ori_op, xori_op, lui_op, 25 andi_op, ori_op, xori_op, lui_op,
26 cop0_op, cop1_op, cop2_op, cop1x_op, 26 cop0_op, cop1_op, cop2_op, cop1x_op,
27 beql_op, bnel_op, blezl_op, bgtzl_op, 27 beql_op, bnel_op, blezl_op, bgtzl_op,
28 daddi_op, daddiu_op, ldl_op, ldr_op, 28 daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op,
29 spec2_op, jalx_op, mdmx_op, spec3_op, 29 spec2_op, jalx_op, mdmx_op, spec3_op,
30 lb_op, lh_op, lwl_op, lw_op, 30 lb_op, lh_op, lwl_op, lw_op,
31 lbu_op, lhu_op, lwr_op, lwu_op, 31 lbu_op, lhu_op, lwr_op, lwu_op,
32 sb_op, sh_op, swl_op, sw_op, 32 sb_op, sh_op, swl_op, sw_op,
33 sdl_op, sdr_op, swr_op, cache_op, 33 sdl_op, sdr_op, swr_op, cache_op,
34 ll_op, lwc1_op, lwc2_op, pref_op, 34 ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op,
35 lld_op, ldc1_op, ldc2_op, ld_op, 35 lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op,
36 sc_op, swc1_op, swc2_op, major_3b_op, 36 sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op,
37 scd_op, sdc1_op, sdc2_op, sd_op 37 scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op
38}; 38};
39 39
40/* 40/*
@@ -83,9 +83,12 @@ enum spec3_op {
83 swe_op = 0x1f, bshfl_op = 0x20, 83 swe_op = 0x1f, bshfl_op = 0x20,
84 swle_op = 0x21, swre_op = 0x22, 84 swle_op = 0x21, swre_op = 0x22,
85 prefe_op = 0x23, dbshfl_op = 0x24, 85 prefe_op = 0x23, dbshfl_op = 0x24,
86 lbue_op = 0x28, lhue_op = 0x29, 86 cache6_op = 0x25, sc6_op = 0x26,
87 lbe_op = 0x2c, lhe_op = 0x2d, 87 scd6_op = 0x27, lbue_op = 0x28,
88 lle_op = 0x2e, lwe_op = 0x2f, 88 lhue_op = 0x29, lbe_op = 0x2c,
89 lhe_op = 0x2d, lle_op = 0x2e,
90 lwe_op = 0x2f, pref6_op = 0x35,
91 ll6_op = 0x36, lld6_op = 0x37,
89 rdhwr_op = 0x3b 92 rdhwr_op = 0x3b
90}; 93};
91 94
@@ -112,7 +115,8 @@ enum cop_op {
112 mfhc_op = 0x03, mtc_op = 0x04, 115 mfhc_op = 0x03, mtc_op = 0x04,
113 dmtc_op = 0x05, ctc_op = 0x06, 116 dmtc_op = 0x05, ctc_op = 0x06,
114 mthc0_op = 0x06, mthc_op = 0x07, 117 mthc0_op = 0x06, mthc_op = 0x07,
115 bc_op = 0x08, cop_op = 0x10, 118 bc_op = 0x08, bc1eqz_op = 0x09,
119 bc1nez_op = 0x0d, cop_op = 0x10,
116 copm_op = 0x18 120 copm_op = 0x18
117}; 121};
118 122
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index d08f83f19db5..2cb7fdead570 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -16,13 +16,6 @@
16#define HAVE_ARCH_SIGINFO_T 16#define HAVE_ARCH_SIGINFO_T
17 17
18/* 18/*
19 * We duplicate the generic versions - <asm-generic/siginfo.h> is just borked
20 * by design ...
21 */
22#define HAVE_ARCH_COPY_SIGINFO
23struct siginfo;
24
25/*
26 * Careful to keep union _sifields from shifting ... 19 * Careful to keep union _sifields from shifting ...
27 */ 20 */
28#if _MIPS_SZLONG == 32 21#if _MIPS_SZLONG == 32
@@ -35,8 +28,9 @@ struct siginfo;
35 28
36#define __ARCH_SIGSYS 29#define __ARCH_SIGSYS
37 30
38#include <asm-generic/siginfo.h> 31#include <uapi/asm-generic/siginfo.h>
39 32
33/* We can't use generic siginfo_t, because our si_code and si_errno are swapped */
40typedef struct siginfo { 34typedef struct siginfo {
41 int si_signo; 35 int si_signo;
42 int si_code; 36 int si_code;
@@ -124,5 +118,6 @@ typedef struct siginfo {
124#define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */ 118#define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */
125#define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */ 119#define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */
126 120
121#include <asm-generic/siginfo.h>
127 122
128#endif /* _UAPI_ASM_SIGINFO_H */ 123#endif /* _UAPI_ASM_SIGINFO_H */
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index c454525e7695..9dd051edb411 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -140,10 +140,18 @@ static void qi_lb60_nand_ident(struct platform_device *pdev,
140 140
141static struct jz_nand_platform_data qi_lb60_nand_pdata = { 141static struct jz_nand_platform_data qi_lb60_nand_pdata = {
142 .ident_callback = qi_lb60_nand_ident, 142 .ident_callback = qi_lb60_nand_ident,
143 .busy_gpio = 94,
144 .banks = { 1 }, 143 .banks = { 1 },
145}; 144};
146 145
146static struct gpiod_lookup_table qi_lb60_nand_gpio_table = {
147 .dev_id = "jz4740-nand.0",
148 .table = {
149 GPIO_LOOKUP("Bank C", 30, "busy", 0),
150 { },
151 },
152};
153
154
147/* Keyboard*/ 155/* Keyboard*/
148 156
149#define KEY_QI_QI KEY_F13 157#define KEY_QI_QI KEY_F13
@@ -472,6 +480,7 @@ static int __init qi_lb60_init_platform_devices(void)
472 jz4740_mmc_device.dev.platform_data = &qi_lb60_mmc_pdata; 480 jz4740_mmc_device.dev.platform_data = &qi_lb60_mmc_pdata;
473 481
474 gpiod_add_lookup_table(&qi_lb60_audio_gpio_table); 482 gpiod_add_lookup_table(&qi_lb60_audio_gpio_table);
483 gpiod_add_lookup_table(&qi_lb60_nand_gpio_table);
475 484
476 jz4740_serial_device_register(); 485 jz4740_serial_device_register();
477 486
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 92987d1bbe5f..d3d2ff2d76dc 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -52,7 +52,7 @@ obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
52obj-$(CONFIG_MIPS_CMP) += smp-cmp.o 52obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
53obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o 53obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
54obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o 54obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o
55obj-$(CONFIG_CPU_MIPSR2) += spram.o 55obj-$(CONFIG_MIPS_SPRAM) += spram.o
56 56
57obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 57obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
58obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o 58obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
@@ -90,6 +90,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
90obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o 90obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o
91obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o 91obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
92obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o 92obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
93obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR) += mips-r2-to-r6-emul.o
93 94
94CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) 95CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
95 96
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 3b2dfdb4865f..750d67ac41e9 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -97,6 +97,7 @@ void output_thread_info_defines(void)
97 OFFSET(TI_TP_VALUE, thread_info, tp_value); 97 OFFSET(TI_TP_VALUE, thread_info, tp_value);
98 OFFSET(TI_CPU, thread_info, cpu); 98 OFFSET(TI_CPU, thread_info, cpu);
99 OFFSET(TI_PRE_COUNT, thread_info, preempt_count); 99 OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
100 OFFSET(TI_R2_EMUL_RET, thread_info, r2_emul_return);
100 OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); 101 OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
101 OFFSET(TI_REGS, thread_info, regs); 102 OFFSET(TI_REGS, thread_info, regs);
102 DEFINE(_THREAD_SIZE, THREAD_SIZE); 103 DEFINE(_THREAD_SIZE, THREAD_SIZE);
@@ -381,6 +382,7 @@ void output_octeon_cop2_state_defines(void)
381 OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); 382 OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result);
382 OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); 383 OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw);
383 OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); 384 OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw);
385 OFFSET(OCTEON_CP2_SHA3, octeon_cop2_state, cop2_sha3);
384 OFFSET(THREAD_CP2, task_struct, thread.cp2); 386 OFFSET(THREAD_CP2, task_struct, thread.cp2);
385 OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); 387 OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg);
386 BLANK(); 388 BLANK();
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 4d7d99d601cc..c2e0f45ddf6c 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -16,6 +16,7 @@
16#include <asm/fpu.h> 16#include <asm/fpu.h>
17#include <asm/fpu_emulator.h> 17#include <asm/fpu_emulator.h>
18#include <asm/inst.h> 18#include <asm/inst.h>
19#include <asm/mips-r2-to-r6-emul.h>
19#include <asm/ptrace.h> 20#include <asm/ptrace.h>
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21 22
@@ -399,11 +400,21 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
399 * @returns: -EFAULT on error and forces SIGBUS, and on success 400 * @returns: -EFAULT on error and forces SIGBUS, and on success
400 * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after 401 * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
401 * evaluating the branch. 402 * evaluating the branch.
403 *
404 * MIPS R6 Compact branches and forbidden slots:
405 * Compact branches do not throw exceptions because they do
406 * not have delay slots. The forbidden slot instruction ($PC+4)
407 * is only executed if the branch was not taken. Otherwise the
408 * forbidden slot is skipped entirely. This means that the
409 * only possible reason to be here because of a MIPS R6 compact
410 * branch instruction is that the forbidden slot has thrown one.
411 * In that case the branch was not taken, so the EPC can be safely
412 * set to EPC + 8.
402 */ 413 */
403int __compute_return_epc_for_insn(struct pt_regs *regs, 414int __compute_return_epc_for_insn(struct pt_regs *regs,
404 union mips_instruction insn) 415 union mips_instruction insn)
405{ 416{
406 unsigned int bit, fcr31, dspcontrol; 417 unsigned int bit, fcr31, dspcontrol, reg;
407 long epc = regs->cp0_epc; 418 long epc = regs->cp0_epc;
408 int ret = 0; 419 int ret = 0;
409 420
@@ -417,6 +428,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
417 regs->regs[insn.r_format.rd] = epc + 8; 428 regs->regs[insn.r_format.rd] = epc + 8;
418 /* Fall through */ 429 /* Fall through */
419 case jr_op: 430 case jr_op:
431 if (NO_R6EMU && insn.r_format.func == jr_op)
432 goto sigill_r6;
420 regs->cp0_epc = regs->regs[insn.r_format.rs]; 433 regs->cp0_epc = regs->regs[insn.r_format.rs];
421 break; 434 break;
422 } 435 }
@@ -429,8 +442,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
429 */ 442 */
430 case bcond_op: 443 case bcond_op:
431 switch (insn.i_format.rt) { 444 switch (insn.i_format.rt) {
432 case bltz_op:
433 case bltzl_op: 445 case bltzl_op:
446 if (NO_R6EMU)
447 goto sigill_r6;
448 case bltz_op:
434 if ((long)regs->regs[insn.i_format.rs] < 0) { 449 if ((long)regs->regs[insn.i_format.rs] < 0) {
435 epc = epc + 4 + (insn.i_format.simmediate << 2); 450 epc = epc + 4 + (insn.i_format.simmediate << 2);
436 if (insn.i_format.rt == bltzl_op) 451 if (insn.i_format.rt == bltzl_op)
@@ -440,8 +455,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
440 regs->cp0_epc = epc; 455 regs->cp0_epc = epc;
441 break; 456 break;
442 457
443 case bgez_op:
444 case bgezl_op: 458 case bgezl_op:
459 if (NO_R6EMU)
460 goto sigill_r6;
461 case bgez_op:
445 if ((long)regs->regs[insn.i_format.rs] >= 0) { 462 if ((long)regs->regs[insn.i_format.rs] >= 0) {
446 epc = epc + 4 + (insn.i_format.simmediate << 2); 463 epc = epc + 4 + (insn.i_format.simmediate << 2);
447 if (insn.i_format.rt == bgezl_op) 464 if (insn.i_format.rt == bgezl_op)
@@ -453,7 +470,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
453 470
454 case bltzal_op: 471 case bltzal_op:
455 case bltzall_op: 472 case bltzall_op:
473 if (NO_R6EMU && (insn.i_format.rs ||
474 insn.i_format.rt == bltzall_op)) {
475 ret = -SIGILL;
476 break;
477 }
456 regs->regs[31] = epc + 8; 478 regs->regs[31] = epc + 8;
479 /*
480 * OK we are here either because we hit a NAL
481 * instruction or because we are emulating an
482 * old bltzal{,l} one. Lets figure out what the
483 * case really is.
484 */
485 if (!insn.i_format.rs) {
486 /*
487 * NAL or BLTZAL with rs == 0
488 * Doesn't matter if we are R6 or not. The
489 * result is the same
490 */
491 regs->cp0_epc += 4 +
492 (insn.i_format.simmediate << 2);
493 break;
494 }
495 /* Now do the real thing for non-R6 BLTZAL{,L} */
457 if ((long)regs->regs[insn.i_format.rs] < 0) { 496 if ((long)regs->regs[insn.i_format.rs] < 0) {
458 epc = epc + 4 + (insn.i_format.simmediate << 2); 497 epc = epc + 4 + (insn.i_format.simmediate << 2);
459 if (insn.i_format.rt == bltzall_op) 498 if (insn.i_format.rt == bltzall_op)
@@ -465,7 +504,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
465 504
466 case bgezal_op: 505 case bgezal_op:
467 case bgezall_op: 506 case bgezall_op:
507 if (NO_R6EMU && (insn.i_format.rs ||
508 insn.i_format.rt == bgezall_op)) {
509 ret = -SIGILL;
510 break;
511 }
468 regs->regs[31] = epc + 8; 512 regs->regs[31] = epc + 8;
513 /*
514 * OK we are here either because we hit a BAL
515 * instruction or because we are emulating an
516 * old bgezal{,l} one. Lets figure out what the
517 * case really is.
518 */
519 if (!insn.i_format.rs) {
520 /*
521 * BAL or BGEZAL with rs == 0
522 * Doesn't matter if we are R6 or not. The
523 * result is the same
524 */
525 regs->cp0_epc += 4 +
526 (insn.i_format.simmediate << 2);
527 break;
528 }
529 /* Now do the real thing for non-R6 BGEZAL{,L} */
469 if ((long)regs->regs[insn.i_format.rs] >= 0) { 530 if ((long)regs->regs[insn.i_format.rs] >= 0) {
470 epc = epc + 4 + (insn.i_format.simmediate << 2); 531 epc = epc + 4 + (insn.i_format.simmediate << 2);
471 if (insn.i_format.rt == bgezall_op) 532 if (insn.i_format.rt == bgezall_op)
@@ -477,7 +538,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
477 538
478 case bposge32_op: 539 case bposge32_op:
479 if (!cpu_has_dsp) 540 if (!cpu_has_dsp)
480 goto sigill; 541 goto sigill_dsp;
481 542
482 dspcontrol = rddsp(0x01); 543 dspcontrol = rddsp(0x01);
483 544
@@ -508,8 +569,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
508 /* 569 /*
509 * These are conditional and in i_format. 570 * These are conditional and in i_format.
510 */ 571 */
511 case beq_op:
512 case beql_op: 572 case beql_op:
573 if (NO_R6EMU)
574 goto sigill_r6;
575 case beq_op:
513 if (regs->regs[insn.i_format.rs] == 576 if (regs->regs[insn.i_format.rs] ==
514 regs->regs[insn.i_format.rt]) { 577 regs->regs[insn.i_format.rt]) {
515 epc = epc + 4 + (insn.i_format.simmediate << 2); 578 epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -520,8 +583,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
520 regs->cp0_epc = epc; 583 regs->cp0_epc = epc;
521 break; 584 break;
522 585
523 case bne_op:
524 case bnel_op: 586 case bnel_op:
587 if (NO_R6EMU)
588 goto sigill_r6;
589 case bne_op:
525 if (regs->regs[insn.i_format.rs] != 590 if (regs->regs[insn.i_format.rs] !=
526 regs->regs[insn.i_format.rt]) { 591 regs->regs[insn.i_format.rt]) {
527 epc = epc + 4 + (insn.i_format.simmediate << 2); 592 epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -532,8 +597,31 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
532 regs->cp0_epc = epc; 597 regs->cp0_epc = epc;
533 break; 598 break;
534 599
535 case blez_op: /* not really i_format */ 600 case blezl_op: /* not really i_format */
536 case blezl_op: 601 if (NO_R6EMU)
602 goto sigill_r6;
603 case blez_op:
604 /*
605 * Compact branches for R6 for the
606 * blez and blezl opcodes.
607 * BLEZ | rs = 0 | rt != 0 == BLEZALC
608 * BLEZ | rs = rt != 0 == BGEZALC
609 * BLEZ | rs != 0 | rt != 0 == BGEUC
610 * BLEZL | rs = 0 | rt != 0 == BLEZC
611 * BLEZL | rs = rt != 0 == BGEZC
612 * BLEZL | rs != 0 | rt != 0 == BGEC
613 *
614 * For real BLEZ{,L}, rt is always 0.
615 */
616
617 if (cpu_has_mips_r6 && insn.i_format.rt) {
618 if ((insn.i_format.opcode == blez_op) &&
619 ((!insn.i_format.rs && insn.i_format.rt) ||
620 (insn.i_format.rs == insn.i_format.rt)))
621 regs->regs[31] = epc + 4;
622 regs->cp0_epc += 8;
623 break;
624 }
537 /* rt field assumed to be zero */ 625 /* rt field assumed to be zero */
538 if ((long)regs->regs[insn.i_format.rs] <= 0) { 626 if ((long)regs->regs[insn.i_format.rs] <= 0) {
539 epc = epc + 4 + (insn.i_format.simmediate << 2); 627 epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -544,8 +632,32 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
544 regs->cp0_epc = epc; 632 regs->cp0_epc = epc;
545 break; 633 break;
546 634
547 case bgtz_op:
548 case bgtzl_op: 635 case bgtzl_op:
636 if (NO_R6EMU)
637 goto sigill_r6;
638 case bgtz_op:
639 /*
640 * Compact branches for R6 for the
641 * bgtz and bgtzl opcodes.
642 * BGTZ | rs = 0 | rt != 0 == BGTZALC
643 * BGTZ | rs = rt != 0 == BLTZALC
644 * BGTZ | rs != 0 | rt != 0 == BLTUC
645 * BGTZL | rs = 0 | rt != 0 == BGTZC
646 * BGTZL | rs = rt != 0 == BLTZC
647 * BGTZL | rs != 0 | rt != 0 == BLTC
648 *
649 * *ZALC varint for BGTZ &&& rt != 0
650 * For real GTZ{,L}, rt is always 0.
651 */
652 if (cpu_has_mips_r6 && insn.i_format.rt) {
653 if ((insn.i_format.opcode == blez_op) &&
654 ((!insn.i_format.rs && insn.i_format.rt) ||
655 (insn.i_format.rs == insn.i_format.rt)))
656 regs->regs[31] = epc + 4;
657 regs->cp0_epc += 8;
658 break;
659 }
660
549 /* rt field assumed to be zero */ 661 /* rt field assumed to be zero */
550 if ((long)regs->regs[insn.i_format.rs] > 0) { 662 if ((long)regs->regs[insn.i_format.rs] > 0) {
551 epc = epc + 4 + (insn.i_format.simmediate << 2); 663 epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -560,40 +672,83 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
560 * And now the FPA/cp1 branch instructions. 672 * And now the FPA/cp1 branch instructions.
561 */ 673 */
562 case cop1_op: 674 case cop1_op:
563 preempt_disable(); 675 if (cpu_has_mips_r6 &&
564 if (is_fpu_owner()) 676 ((insn.i_format.rs == bc1eqz_op) ||
565 fcr31 = read_32bit_cp1_register(CP1_STATUS); 677 (insn.i_format.rs == bc1nez_op))) {
566 else 678 if (!used_math()) { /* First time FPU user */
567 fcr31 = current->thread.fpu.fcr31; 679 ret = init_fpu();
568 preempt_enable(); 680 if (ret && NO_R6EMU) {
569 681 ret = -ret;
570 bit = (insn.i_format.rt >> 2); 682 break;
571 bit += (bit != 0); 683 }
572 bit += 23; 684 ret = 0;
573 switch (insn.i_format.rt & 3) { 685 set_used_math();
574 case 0: /* bc1f */ 686 }
575 case 2: /* bc1fl */ 687 lose_fpu(1); /* Save FPU state for the emulator. */
576 if (~fcr31 & (1 << bit)) { 688 reg = insn.i_format.rt;
577 epc = epc + 4 + (insn.i_format.simmediate << 2); 689 bit = 0;
578 if (insn.i_format.rt == 2) 690 switch (insn.i_format.rs) {
579 ret = BRANCH_LIKELY_TAKEN; 691 case bc1eqz_op:
580 } else 692 /* Test bit 0 */
693 if (get_fpr32(&current->thread.fpu.fpr[reg], 0)
694 & 0x1)
695 bit = 1;
696 break;
697 case bc1nez_op:
698 /* Test bit 0 */
699 if (!(get_fpr32(&current->thread.fpu.fpr[reg], 0)
700 & 0x1))
701 bit = 1;
702 break;
703 }
704 own_fpu(1);
705 if (bit)
706 epc = epc + 4 +
707 (insn.i_format.simmediate << 2);
708 else
581 epc += 8; 709 epc += 8;
582 regs->cp0_epc = epc; 710 regs->cp0_epc = epc;
711
583 break; 712 break;
713 } else {
584 714
585 case 1: /* bc1t */ 715 preempt_disable();
586 case 3: /* bc1tl */ 716 if (is_fpu_owner())
587 if (fcr31 & (1 << bit)) { 717 fcr31 = read_32bit_cp1_register(CP1_STATUS);
588 epc = epc + 4 + (insn.i_format.simmediate << 2); 718 else
589 if (insn.i_format.rt == 3) 719 fcr31 = current->thread.fpu.fcr31;
590 ret = BRANCH_LIKELY_TAKEN; 720 preempt_enable();
591 } else 721
592 epc += 8; 722 bit = (insn.i_format.rt >> 2);
593 regs->cp0_epc = epc; 723 bit += (bit != 0);
724 bit += 23;
725 switch (insn.i_format.rt & 3) {
726 case 0: /* bc1f */
727 case 2: /* bc1fl */
728 if (~fcr31 & (1 << bit)) {
729 epc = epc + 4 +
730 (insn.i_format.simmediate << 2);
731 if (insn.i_format.rt == 2)
732 ret = BRANCH_LIKELY_TAKEN;
733 } else
734 epc += 8;
735 regs->cp0_epc = epc;
736 break;
737
738 case 1: /* bc1t */
739 case 3: /* bc1tl */
740 if (fcr31 & (1 << bit)) {
741 epc = epc + 4 +
742 (insn.i_format.simmediate << 2);
743 if (insn.i_format.rt == 3)
744 ret = BRANCH_LIKELY_TAKEN;
745 } else
746 epc += 8;
747 regs->cp0_epc = epc;
748 break;
749 }
594 break; 750 break;
595 } 751 }
596 break;
597#ifdef CONFIG_CPU_CAVIUM_OCTEON 752#ifdef CONFIG_CPU_CAVIUM_OCTEON
598 case lwc2_op: /* This is bbit0 on Octeon */ 753 case lwc2_op: /* This is bbit0 on Octeon */
599 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) 754 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
@@ -626,15 +781,72 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
626 epc += 8; 781 epc += 8;
627 regs->cp0_epc = epc; 782 regs->cp0_epc = epc;
628 break; 783 break;
784#else
785 case bc6_op:
786 /* Only valid for MIPS R6 */
787 if (!cpu_has_mips_r6) {
788 ret = -SIGILL;
789 break;
790 }
791 regs->cp0_epc += 8;
792 break;
793 case balc6_op:
794 if (!cpu_has_mips_r6) {
795 ret = -SIGILL;
796 break;
797 }
798 /* Compact branch: BALC */
799 regs->regs[31] = epc + 4;
800 epc += 4 + (insn.i_format.simmediate << 2);
801 regs->cp0_epc = epc;
802 break;
803 case beqzcjic_op:
804 if (!cpu_has_mips_r6) {
805 ret = -SIGILL;
806 break;
807 }
808 /* Compact branch: BEQZC || JIC */
809 regs->cp0_epc += 8;
810 break;
811 case bnezcjialc_op:
812 if (!cpu_has_mips_r6) {
813 ret = -SIGILL;
814 break;
815 }
816 /* Compact branch: BNEZC || JIALC */
817 if (insn.i_format.rs)
818 regs->regs[31] = epc + 4;
819 regs->cp0_epc += 8;
820 break;
629#endif 821#endif
822 case cbcond0_op:
823 case cbcond1_op:
824 /* Only valid for MIPS R6 */
825 if (!cpu_has_mips_r6) {
826 ret = -SIGILL;
827 break;
828 }
829 /*
830 * Compact branches:
831 * bovc, beqc, beqzalc, bnvc, bnec, bnezlac
832 */
833 if (insn.i_format.rt && !insn.i_format.rs)
834 regs->regs[31] = epc + 4;
835 regs->cp0_epc += 8;
836 break;
630 } 837 }
631 838
632 return ret; 839 return ret;
633 840
634sigill: 841sigill_dsp:
635 printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); 842 printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
636 force_sig(SIGBUS, current); 843 force_sig(SIGBUS, current);
637 return -EFAULT; 844 return -EFAULT;
845sigill_r6:
846 pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n",
847 current->comm);
848 force_sig(SIGILL, current);
849 return -EFAULT;
638} 850}
639EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn); 851EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
640 852
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 6acaad0480af..82bd2b278a24 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -11,7 +11,6 @@
11#include <linux/percpu.h> 11#include <linux/percpu.h>
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/irqchip/mips-gic.h>
15 14
16#include <asm/time.h> 15#include <asm/time.h>
17#include <asm/cevt-r4k.h> 16#include <asm/cevt-r4k.h>
@@ -40,7 +39,7 @@ int cp0_timer_irq_installed;
40 39
41irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 40irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
42{ 41{
43 const int r2 = cpu_has_mips_r2; 42 const int r2 = cpu_has_mips_r2_r6;
44 struct clock_event_device *cd; 43 struct clock_event_device *cd;
45 int cpu = smp_processor_id(); 44 int cpu = smp_processor_id();
46 45
@@ -85,10 +84,7 @@ void mips_event_handler(struct clock_event_device *dev)
85 */ 84 */
86static int c0_compare_int_pending(void) 85static int c0_compare_int_pending(void)
87{ 86{
88#ifdef CONFIG_MIPS_GIC 87 /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */
89 if (gic_present)
90 return gic_get_timer_pending();
91#endif
92 return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); 88 return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
93} 89}
94 90
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 0384b05ab5a0..55b759a0019e 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -99,11 +99,11 @@ not_nmi:
99 xori t2, t1, 0x7 99 xori t2, t1, 0x7
100 beqz t2, 1f 100 beqz t2, 1f
101 li t3, 32 101 li t3, 32
102 addi t1, t1, 1 102 addiu t1, t1, 1
103 sllv t1, t3, t1 103 sllv t1, t3, t1
1041: /* At this point t1 == I-cache sets per way */ 1041: /* At this point t1 == I-cache sets per way */
105 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ 105 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
106 addi t2, t2, 1 106 addiu t2, t2, 1
107 mul t1, t1, t0 107 mul t1, t1, t0
108 mul t1, t1, t2 108 mul t1, t1, t2
109 109
@@ -126,11 +126,11 @@ icache_done:
126 xori t2, t1, 0x7 126 xori t2, t1, 0x7
127 beqz t2, 1f 127 beqz t2, 1f
128 li t3, 32 128 li t3, 32
129 addi t1, t1, 1 129 addiu t1, t1, 1
130 sllv t1, t3, t1 130 sllv t1, t3, t1
1311: /* At this point t1 == D-cache sets per way */ 1311: /* At this point t1 == D-cache sets per way */
132 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ 132 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
133 addi t2, t2, 1 133 addiu t2, t2, 1
134 mul t1, t1, t0 134 mul t1, t1, t0
135 mul t1, t1, t2 135 mul t1, t1, t2
136 136
@@ -250,7 +250,7 @@ LEAF(mips_cps_core_init)
250 mfc0 t0, CP0_MVPCONF0 250 mfc0 t0, CP0_MVPCONF0
251 srl t0, t0, MVPCONF0_PVPE_SHIFT 251 srl t0, t0, MVPCONF0_PVPE_SHIFT
252 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 252 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
253 addi t7, t0, 1 253 addiu t7, t0, 1
254 254
255 /* If there's only 1, we're done */ 255 /* If there's only 1, we're done */
256 beqz t0, 2f 256 beqz t0, 2f
@@ -280,7 +280,7 @@ LEAF(mips_cps_core_init)
280 mttc0 t0, CP0_TCHALT 280 mttc0 t0, CP0_TCHALT
281 281
282 /* Next VPE */ 282 /* Next VPE */
283 addi t5, t5, 1 283 addiu t5, t5, 1
284 slt t0, t5, t7 284 slt t0, t5, t7
285 bnez t0, 1b 285 bnez t0, 1b
286 nop 286 nop
@@ -317,7 +317,7 @@ LEAF(mips_cps_boot_vpes)
317 mfc0 t1, CP0_MVPCONF0 317 mfc0 t1, CP0_MVPCONF0
318 srl t1, t1, MVPCONF0_PVPE_SHIFT 318 srl t1, t1, MVPCONF0_PVPE_SHIFT
319 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT 319 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
320 addi t1, t1, 1 320 addiu t1, t1, 1
321 321
322 /* Calculate a mask for the VPE ID from EBase.CPUNum */ 322 /* Calculate a mask for the VPE ID from EBase.CPUNum */
323 clz t1, t1 323 clz t1, t1
@@ -424,7 +424,7 @@ LEAF(mips_cps_boot_vpes)
424 424
425 /* Next VPE */ 425 /* Next VPE */
4262: srl t6, t6, 1 4262: srl t6, t6, 1
427 addi t5, t5, 1 427 addiu t5, t5, 1
428 bnez t6, 1b 428 bnez t6, 1b
429 nop 429 nop
430 430
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 2d80b5f1aeae..09f4034f239f 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -244,7 +244,7 @@ static inline void check_daddi(void)
244 panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); 244 panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
245} 245}
246 246
247int daddiu_bug = -1; 247int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1;
248 248
249static inline void check_daddiu(void) 249static inline void check_daddiu(void)
250{ 250{
@@ -314,11 +314,14 @@ static inline void check_daddiu(void)
314 314
315void __init check_bugs64_early(void) 315void __init check_bugs64_early(void)
316{ 316{
317 check_mult_sh(); 317 if (!config_enabled(CONFIG_CPU_MIPSR6)) {
318 check_daddiu(); 318 check_mult_sh();
319 check_daddiu();
320 }
319} 321}
320 322
321void __init check_bugs64(void) 323void __init check_bugs64(void)
322{ 324{
323 check_daddi(); 325 if (!config_enabled(CONFIG_CPU_MIPSR6))
326 check_daddi();
324} 327}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5342674842f5..48dfb9de853d 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -237,6 +237,13 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
237 c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; 237 c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
238 break; 238 break;
239 239
240 /* R6 incompatible with everything else */
241 case MIPS_CPU_ISA_M64R6:
242 c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6;
243 case MIPS_CPU_ISA_M32R6:
244 c->isa_level |= MIPS_CPU_ISA_M32R6;
245 /* Break here so we don't add incompatible ISAs */
246 break;
240 case MIPS_CPU_ISA_M32R2: 247 case MIPS_CPU_ISA_M32R2:
241 c->isa_level |= MIPS_CPU_ISA_M32R2; 248 c->isa_level |= MIPS_CPU_ISA_M32R2;
242 case MIPS_CPU_ISA_M32R1: 249 case MIPS_CPU_ISA_M32R1:
@@ -326,6 +333,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
326 case 1: 333 case 1:
327 set_isa(c, MIPS_CPU_ISA_M32R2); 334 set_isa(c, MIPS_CPU_ISA_M32R2);
328 break; 335 break;
336 case 2:
337 set_isa(c, MIPS_CPU_ISA_M32R6);
338 break;
329 default: 339 default:
330 goto unknown; 340 goto unknown;
331 } 341 }
@@ -338,6 +348,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
338 case 1: 348 case 1:
339 set_isa(c, MIPS_CPU_ISA_M64R2); 349 set_isa(c, MIPS_CPU_ISA_M64R2);
340 break; 350 break;
351 case 2:
352 set_isa(c, MIPS_CPU_ISA_M64R6);
353 break;
341 default: 354 default:
342 goto unknown; 355 goto unknown;
343 } 356 }
@@ -424,8 +437,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
424 if (config3 & MIPS_CONF3_MSA) 437 if (config3 & MIPS_CONF3_MSA)
425 c->ases |= MIPS_ASE_MSA; 438 c->ases |= MIPS_ASE_MSA;
426 /* Only tested on 32-bit cores */ 439 /* Only tested on 32-bit cores */
427 if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) 440 if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) {
441 c->htw_seq = 0;
428 c->options |= MIPS_CPU_HTW; 442 c->options |= MIPS_CPU_HTW;
443 }
429 444
430 return config3 & MIPS_CONF_M; 445 return config3 & MIPS_CONF_M;
431} 446}
@@ -499,6 +514,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
499 c->options |= MIPS_CPU_EVA; 514 c->options |= MIPS_CPU_EVA;
500 if (config5 & MIPS_CONF5_MRP) 515 if (config5 & MIPS_CONF5_MRP)
501 c->options |= MIPS_CPU_MAAR; 516 c->options |= MIPS_CPU_MAAR;
517 if (config5 & MIPS_CONF5_LLB)
518 c->options |= MIPS_CPU_RW_LLB;
502 519
503 return config5 & MIPS_CONF_M; 520 return config5 & MIPS_CONF_M;
504} 521}
@@ -533,7 +550,7 @@ static void decode_configs(struct cpuinfo_mips *c)
533 550
534 if (cpu_has_rixi) { 551 if (cpu_has_rixi) {
535 /* Enable the RIXI exceptions */ 552 /* Enable the RIXI exceptions */
536 write_c0_pagegrain(read_c0_pagegrain() | PG_IEC); 553 set_c0_pagegrain(PG_IEC);
537 back_to_back_c0_hazard(); 554 back_to_back_c0_hazard();
538 /* Verify the IEC bit is set */ 555 /* Verify the IEC bit is set */
539 if (read_c0_pagegrain() & PG_IEC) 556 if (read_c0_pagegrain() & PG_IEC)
@@ -541,7 +558,7 @@ static void decode_configs(struct cpuinfo_mips *c)
541 } 558 }
542 559
543#ifndef CONFIG_MIPS_CPS 560#ifndef CONFIG_MIPS_CPS
544 if (cpu_has_mips_r2) { 561 if (cpu_has_mips_r2_r6) {
545 c->core = get_ebase_cpunum(); 562 c->core = get_ebase_cpunum();
546 if (cpu_has_mipsmt) 563 if (cpu_has_mipsmt)
547 c->core >>= fls(core_nvpes()) - 1; 564 c->core >>= fls(core_nvpes()) - 1;
@@ -896,6 +913,11 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
896{ 913{
897 c->writecombine = _CACHE_UNCACHED_ACCELERATED; 914 c->writecombine = _CACHE_UNCACHED_ACCELERATED;
898 switch (c->processor_id & PRID_IMP_MASK) { 915 switch (c->processor_id & PRID_IMP_MASK) {
916 case PRID_IMP_QEMU_GENERIC:
917 c->writecombine = _CACHE_UNCACHED;
918 c->cputype = CPU_QEMU_GENERIC;
919 __cpu_name[cpu] = "MIPS GENERIC QEMU";
920 break;
899 case PRID_IMP_4KC: 921 case PRID_IMP_4KC:
900 c->cputype = CPU_4KC; 922 c->cputype = CPU_4KC;
901 c->writecombine = _CACHE_UNCACHED; 923 c->writecombine = _CACHE_UNCACHED;
@@ -1345,8 +1367,7 @@ void cpu_probe(void)
1345 if (c->options & MIPS_CPU_FPU) { 1367 if (c->options & MIPS_CPU_FPU) {
1346 c->fpu_id = cpu_get_fpu_id(); 1368 c->fpu_id = cpu_get_fpu_id();
1347 1369
1348 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 1370 if (c->isa_level & cpu_has_mips_r) {
1349 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
1350 if (c->fpu_id & MIPS_FPIR_3D) 1371 if (c->fpu_id & MIPS_FPIR_3D)
1351 c->ases |= MIPS_ASE_MIPS3D; 1372 c->ases |= MIPS_ASE_MIPS3D;
1352 if (c->fpu_id & MIPS_FPIR_FREP) 1373 if (c->fpu_id & MIPS_FPIR_FREP)
@@ -1354,7 +1375,7 @@ void cpu_probe(void)
1354 } 1375 }
1355 } 1376 }
1356 1377
1357 if (cpu_has_mips_r2) { 1378 if (cpu_has_mips_r2_r6) {
1358 c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; 1379 c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
1359 /* R2 has Performance Counter Interrupt indicator */ 1380 /* R2 has Performance Counter Interrupt indicator */
1360 c->options |= MIPS_CPU_PCI; 1381 c->options |= MIPS_CPU_PCI;
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index a5b5b56485c1..d2c09f6475c5 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -11,29 +11,112 @@
11#include <linux/elf.h> 11#include <linux/elf.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13 13
14/* FPU modes */
14enum { 15enum {
15 FP_ERROR = -1, 16 FP_FRE,
16 FP_DOUBLE_64A = -2, 17 FP_FR0,
18 FP_FR1,
17}; 19};
18 20
21/**
22 * struct mode_req - ABI FPU mode requirements
23 * @single: The program being loaded needs an FPU but it will only issue
24 * single precision instructions meaning that it can execute in
25 * either FR0 or FR1.
26 * @soft: The soft(-float) requirement means that the program being
27 * loaded needs has no FPU dependency at all (i.e. it has no
28 * FPU instructions).
29 * @fr1: The program being loaded depends on FPU being in FR=1 mode.
30 * @frdefault: The program being loaded depends on the default FPU mode.
31 * That is FR0 for O32 and FR1 for N32/N64.
32 * @fre: The program being loaded depends on FPU with FRE=1. This mode is
33 * a bridge which uses FR=1 whilst still being able to maintain
34 * full compatibility with pre-existing code using the O32 FP32
35 * ABI.
36 *
37 * More information about the FP ABIs can be found here:
38 *
39 * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up
40 *
41 */
42
43struct mode_req {
44 bool single;
45 bool soft;
46 bool fr1;
47 bool frdefault;
48 bool fre;
49};
50
51static const struct mode_req fpu_reqs[] = {
52 [MIPS_ABI_FP_ANY] = { true, true, true, true, true },
53 [MIPS_ABI_FP_DOUBLE] = { false, false, false, true, true },
54 [MIPS_ABI_FP_SINGLE] = { true, false, false, false, false },
55 [MIPS_ABI_FP_SOFT] = { false, true, false, false, false },
56 [MIPS_ABI_FP_OLD_64] = { false, false, false, false, false },
57 [MIPS_ABI_FP_XX] = { false, false, true, true, true },
58 [MIPS_ABI_FP_64] = { false, false, true, false, false },
59 [MIPS_ABI_FP_64A] = { false, false, true, false, true }
60};
61
62/*
63 * Mode requirements when .MIPS.abiflags is not present in the ELF.
64 * Not present means that everything is acceptable except FR1.
65 */
66static struct mode_req none_req = { true, true, false, true, true };
67
19int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, 68int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
20 bool is_interp, struct arch_elf_state *state) 69 bool is_interp, struct arch_elf_state *state)
21{ 70{
22 struct elf32_hdr *ehdr = _ehdr; 71 struct elf32_hdr *ehdr32 = _ehdr;
23 struct elf32_phdr *phdr = _phdr; 72 struct elf32_phdr *phdr32 = _phdr;
73 struct elf64_phdr *phdr64 = _phdr;
24 struct mips_elf_abiflags_v0 abiflags; 74 struct mips_elf_abiflags_v0 abiflags;
25 int ret; 75 int ret;
26 76
27 if (config_enabled(CONFIG_64BIT) && 77 /* Lets see if this is an O32 ELF */
28 (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) 78 if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
29 return 0; 79 /* FR = 1 for N32 */
30 if (phdr->p_type != PT_MIPS_ABIFLAGS) 80 if (ehdr32->e_flags & EF_MIPS_ABI2)
31 return 0; 81 state->overall_fp_mode = FP_FR1;
32 if (phdr->p_filesz < sizeof(abiflags)) 82 else
33 return -EINVAL; 83 /* Set a good default FPU mode for O32 */
84 state->overall_fp_mode = cpu_has_mips_r6 ?
85 FP_FRE : FP_FR0;
86
87 if (ehdr32->e_flags & EF_MIPS_FP64) {
88 /*
89 * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
90 * later if needed
91 */
92 if (is_interp)
93 state->interp_fp_abi = MIPS_ABI_FP_OLD_64;
94 else
95 state->fp_abi = MIPS_ABI_FP_OLD_64;
96 }
97 if (phdr32->p_type != PT_MIPS_ABIFLAGS)
98 return 0;
99
100 if (phdr32->p_filesz < sizeof(abiflags))
101 return -EINVAL;
102
103 ret = kernel_read(elf, phdr32->p_offset,
104 (char *)&abiflags,
105 sizeof(abiflags));
106 } else {
107 /* FR=1 is really the only option for 64-bit */
108 state->overall_fp_mode = FP_FR1;
109
110 if (phdr64->p_type != PT_MIPS_ABIFLAGS)
111 return 0;
112 if (phdr64->p_filesz < sizeof(abiflags))
113 return -EINVAL;
114
115 ret = kernel_read(elf, phdr64->p_offset,
116 (char *)&abiflags,
117 sizeof(abiflags));
118 }
34 119
35 ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags,
36 sizeof(abiflags));
37 if (ret < 0) 120 if (ret < 0)
38 return ret; 121 return ret;
39 if (ret != sizeof(abiflags)) 122 if (ret != sizeof(abiflags))
@@ -48,35 +131,30 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
48 return 0; 131 return 0;
49} 132}
50 133
51static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi) 134static inline unsigned get_fp_abi(int in_abi)
52{ 135{
53 /* If the ABI requirement is provided, simply return that */ 136 /* If the ABI requirement is provided, simply return that */
54 if (in_abi != -1) 137 if (in_abi != MIPS_ABI_FP_UNKNOWN)
55 return in_abi; 138 return in_abi;
56 139
57 /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */ 140 /* Unknown ABI */
58 if (ehdr->e_flags & EF_MIPS_FP64) 141 return MIPS_ABI_FP_UNKNOWN;
59 return MIPS_ABI_FP_64;
60
61 /* Default to MIPS_ABI_FP_DOUBLE */
62 return MIPS_ABI_FP_DOUBLE;
63} 142}
64 143
65int arch_check_elf(void *_ehdr, bool has_interpreter, 144int arch_check_elf(void *_ehdr, bool has_interpreter,
66 struct arch_elf_state *state) 145 struct arch_elf_state *state)
67{ 146{
68 struct elf32_hdr *ehdr = _ehdr; 147 struct elf32_hdr *ehdr = _ehdr;
69 unsigned fp_abi, interp_fp_abi, abi0, abi1; 148 struct mode_req prog_req, interp_req;
149 int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
70 150
71 /* Ignore non-O32 binaries */ 151 if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
72 if (config_enabled(CONFIG_64BIT) &&
73 (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
74 return 0; 152 return 0;
75 153
76 fp_abi = get_fp_abi(ehdr, state->fp_abi); 154 fp_abi = get_fp_abi(state->fp_abi);
77 155
78 if (has_interpreter) { 156 if (has_interpreter) {
79 interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi); 157 interp_fp_abi = get_fp_abi(state->interp_fp_abi);
80 158
81 abi0 = min(fp_abi, interp_fp_abi); 159 abi0 = min(fp_abi, interp_fp_abi);
82 abi1 = max(fp_abi, interp_fp_abi); 160 abi1 = max(fp_abi, interp_fp_abi);
@@ -84,108 +162,103 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
84 abi0 = abi1 = fp_abi; 162 abi0 = abi1 = fp_abi;
85 } 163 }
86 164
87 state->overall_abi = FP_ERROR; 165 /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */
88 166 max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
89 if (abi0 == abi1) { 167 (!(ehdr->e_flags & EF_MIPS_ABI2))) ?
90 state->overall_abi = abi0; 168 MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT;
91 } else if (abi0 == MIPS_ABI_FP_ANY) {
92 state->overall_abi = abi1;
93 } else if (abi0 == MIPS_ABI_FP_DOUBLE) {
94 switch (abi1) {
95 case MIPS_ABI_FP_XX:
96 state->overall_abi = MIPS_ABI_FP_DOUBLE;
97 break;
98
99 case MIPS_ABI_FP_64A:
100 state->overall_abi = FP_DOUBLE_64A;
101 break;
102 }
103 } else if (abi0 == MIPS_ABI_FP_SINGLE ||
104 abi0 == MIPS_ABI_FP_SOFT) {
105 /* Cannot link with other ABIs */
106 } else if (abi0 == MIPS_ABI_FP_OLD_64) {
107 switch (abi1) {
108 case MIPS_ABI_FP_XX:
109 case MIPS_ABI_FP_64:
110 case MIPS_ABI_FP_64A:
111 state->overall_abi = MIPS_ABI_FP_64;
112 break;
113 }
114 } else if (abi0 == MIPS_ABI_FP_XX ||
115 abi0 == MIPS_ABI_FP_64 ||
116 abi0 == MIPS_ABI_FP_64A) {
117 state->overall_abi = MIPS_ABI_FP_64;
118 }
119 169
120 switch (state->overall_abi) { 170 if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
121 case MIPS_ABI_FP_64: 171 (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
122 case MIPS_ABI_FP_64A: 172 return -ELIBBAD;
123 case FP_DOUBLE_64A: 173
124 if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) 174 /* It's time to determine the FPU mode requirements */
125 return -ELIBBAD; 175 prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0];
126 break; 176 interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1];
127 177
128 case FP_ERROR: 178 /*
179 * Check whether the program's and interp's ABIs have a matching FPU
180 * mode requirement.
181 */
182 prog_req.single = interp_req.single && prog_req.single;
183 prog_req.soft = interp_req.soft && prog_req.soft;
184 prog_req.fr1 = interp_req.fr1 && prog_req.fr1;
185 prog_req.frdefault = interp_req.frdefault && prog_req.frdefault;
186 prog_req.fre = interp_req.fre && prog_req.fre;
187
188 /*
189 * Determine the desired FPU mode
190 *
191 * Decision making:
192 *
193 * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This
194 * means that we have a combination of program and interpreter
195 * that inherently require the hybrid FP mode.
196 * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or
197 * fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU
198 * instructions so we don't care about the mode. We will simply use
199 * the one preferred by the hardware. In fpxx case, that ABI can
200 * handle both FR=1 and FR=0, so, again, we simply choose the one
201 * preferred by the hardware. Next, if we only use single-precision
202 * FPU instructions, and the default ABI FPU mode is not good
203 * (ie single + any ABI combination), we set again the FPU mode to the
204 * one is preferred by the hardware. Next, if we know that the code
205 * will only use single-precision instructions, shown by single being
206 * true but frdefault being false, then we again set the FPU mode to
207 * the one that is preferred by the hardware.
208 * - We want FP_FR1 if that's the only matching mode and the default one
209 * is not good.
210 * - Return with -ELIBADD if we can't find a matching FPU mode.
211 */
212 if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1)
213 state->overall_fp_mode = FP_FRE;
214 else if ((prog_req.fr1 && prog_req.frdefault) ||
215 (prog_req.single && !prog_req.frdefault))
216 /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
217 state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
218 cpu_has_mips_r2_r6) ?
219 FP_FR1 : FP_FR0;
220 else if (prog_req.fr1)
221 state->overall_fp_mode = FP_FR1;
222 else if (!prog_req.fre && !prog_req.frdefault &&
223 !prog_req.fr1 && !prog_req.single && !prog_req.soft)
129 return -ELIBBAD; 224 return -ELIBBAD;
130 }
131 225
132 return 0; 226 return 0;
133} 227}
134 228
135void mips_set_personality_fp(struct arch_elf_state *state) 229static inline void set_thread_fp_mode(int hybrid, int regs32)
136{ 230{
137 if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) { 231 if (hybrid)
138 /* 232 set_thread_flag(TIF_HYBRID_FPREGS);
139 * Use hybrid FPRs for all code which can correctly execute 233 else
140 * with that mode.
141 */
142 switch (state->overall_abi) {
143 case MIPS_ABI_FP_DOUBLE:
144 case MIPS_ABI_FP_SINGLE:
145 case MIPS_ABI_FP_SOFT:
146 case MIPS_ABI_FP_XX:
147 case MIPS_ABI_FP_ANY:
148 /* FR=1, FRE=1 */
149 clear_thread_flag(TIF_32BIT_FPREGS);
150 set_thread_flag(TIF_HYBRID_FPREGS);
151 return;
152 }
153 }
154
155 switch (state->overall_abi) {
156 case MIPS_ABI_FP_DOUBLE:
157 case MIPS_ABI_FP_SINGLE:
158 case MIPS_ABI_FP_SOFT:
159 /* FR=0 */
160 set_thread_flag(TIF_32BIT_FPREGS);
161 clear_thread_flag(TIF_HYBRID_FPREGS); 234 clear_thread_flag(TIF_HYBRID_FPREGS);
162 break; 235 if (regs32)
163 236 set_thread_flag(TIF_32BIT_FPREGS);
164 case FP_DOUBLE_64A: 237 else
165 /* FR=1, FRE=1 */
166 clear_thread_flag(TIF_32BIT_FPREGS); 238 clear_thread_flag(TIF_32BIT_FPREGS);
167 set_thread_flag(TIF_HYBRID_FPREGS); 239}
168 break;
169 240
170 case MIPS_ABI_FP_64: 241void mips_set_personality_fp(struct arch_elf_state *state)
171 case MIPS_ABI_FP_64A: 242{
172 /* FR=1, FRE=0 */ 243 /*
173 clear_thread_flag(TIF_32BIT_FPREGS); 244 * This function is only ever called for O32 ELFs so we should
174 clear_thread_flag(TIF_HYBRID_FPREGS); 245 * not be worried about N32/N64 binaries.
175 break; 246 */
176 247
177 case MIPS_ABI_FP_XX: 248 if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
178 case MIPS_ABI_FP_ANY: 249 return;
179 if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
180 set_thread_flag(TIF_32BIT_FPREGS);
181 else
182 clear_thread_flag(TIF_32BIT_FPREGS);
183 250
184 clear_thread_flag(TIF_HYBRID_FPREGS); 251 switch (state->overall_fp_mode) {
252 case FP_FRE:
253 set_thread_fp_mode(1, 0);
254 break;
255 case FP_FR0:
256 set_thread_fp_mode(0, 1);
257 break;
258 case FP_FR1:
259 set_thread_fp_mode(0, 0);
185 break; 260 break;
186
187 default: 261 default:
188 case FP_ERROR:
189 BUG(); 262 BUG();
190 } 263 }
191} 264}
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 4353d323f017..af41ba6db960 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -46,6 +46,11 @@ resume_userspace:
46 local_irq_disable # make sure we dont miss an 46 local_irq_disable # make sure we dont miss an
47 # interrupt setting need_resched 47 # interrupt setting need_resched
48 # between sampling and return 48 # between sampling and return
49#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR
50 lw k0, TI_R2_EMUL_RET($28)
51 bnez k0, restore_all_from_r2_emul
52#endif
53
49 LONG_L a2, TI_FLAGS($28) # current->work 54 LONG_L a2, TI_FLAGS($28) # current->work
50 andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) 55 andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace)
51 bnez t0, work_pending 56 bnez t0, work_pending
@@ -114,6 +119,19 @@ restore_partial: # restore partial frame
114 RESTORE_SP_AND_RET 119 RESTORE_SP_AND_RET
115 .set at 120 .set at
116 121
122#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR
123restore_all_from_r2_emul: # restore full frame
124 .set noat
125 sw zero, TI_R2_EMUL_RET($28) # reset it
126 RESTORE_TEMP
127 RESTORE_AT
128 RESTORE_STATIC
129 RESTORE_SOME
130 LONG_L sp, PT_R29(sp)
131 eretnc
132 .set at
133#endif
134
117work_pending: 135work_pending:
118 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS 136 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
119 beqz t0, work_notifysig 137 beqz t0, work_notifysig
@@ -158,7 +176,8 @@ syscall_exit_work:
158 jal syscall_trace_leave 176 jal syscall_trace_leave
159 b resume_userspace 177 b resume_userspace
160 178
161#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) 179#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \
180 defined(CONFIG_MIPS_MT)
162 181
163/* 182/*
164 * MIPS32R2 Instruction Hazard Barrier - must be called 183 * MIPS32R2 Instruction Hazard Barrier - must be called
@@ -171,4 +190,4 @@ LEAF(mips_ihb)
171 nop 190 nop
172 END(mips_ihb) 191 END(mips_ihb)
173 192
174#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */ 193#endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index a5e26dd90592..2ebaabe3af15 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -125,7 +125,7 @@ LEAF(__r4k_wait)
125 nop 125 nop
126 nop 126 nop
127#endif 127#endif
128 .set arch=r4000 128 .set MIPS_ISA_ARCH_LEVEL_RAW
129 wait 129 wait
130 /* end of rollback region (the region size must be power of two) */ 130 /* end of rollback region (the region size must be power of two) */
1311: 1311:
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 0b9082b6b683..368c88b7eb6c 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -186,6 +186,7 @@ void __init check_wait(void)
186 case CPU_PROAPTIV: 186 case CPU_PROAPTIV:
187 case CPU_P5600: 187 case CPU_P5600:
188 case CPU_M5150: 188 case CPU_M5150:
189 case CPU_QEMU_GENERIC:
189 cpu_wait = r4k_wait; 190 cpu_wait = r4k_wait;
190 if (read_c0_config7() & MIPS_CONF7_WII) 191 if (read_c0_config7() & MIPS_CONF7_WII)
191 cpu_wait = r4k_wait_irqoff; 192 cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
new file mode 100644
index 000000000000..64d17e41093b
--- /dev/null
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -0,0 +1,2378 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2014 Imagination Technologies Ltd.
7 * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
8 * Author: Markos Chandras <markos.chandras@imgtec.com>
9 *
10 * MIPS R2 user space instruction emulator for MIPS R6
11 *
12 */
13#include <linux/bug.h>
14#include <linux/compiler.h>
15#include <linux/debugfs.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/ptrace.h>
20#include <linux/seq_file.h>
21
22#include <asm/asm.h>
23#include <asm/branch.h>
24#include <asm/break.h>
25#include <asm/fpu.h>
26#include <asm/fpu_emulator.h>
27#include <asm/inst.h>
28#include <asm/mips-r2-to-r6-emul.h>
29#include <asm/local.h>
30#include <asm/ptrace.h>
31#include <asm/uaccess.h>
32
33#ifdef CONFIG_64BIT
34#define ADDIU "daddiu "
35#define INS "dins "
36#define EXT "dext "
37#else
38#define ADDIU "addiu "
39#define INS "ins "
40#define EXT "ext "
41#endif /* CONFIG_64BIT */
42
43#define SB "sb "
44#define LB "lb "
45#define LL "ll "
46#define SC "sc "
47
48DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
49DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
50DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
51
52extern const unsigned int fpucondbit[8];
53
54#define MIPS_R2_EMUL_TOTAL_PASS 10
55
56int mipsr2_emulation = 0;
57
58static int __init mipsr2emu_enable(char *s)
59{
60 mipsr2_emulation = 1;
61
62 pr_info("MIPS R2-to-R6 Emulator Enabled!");
63
64 return 1;
65}
66__setup("mipsr2emu", mipsr2emu_enable);
67
68/**
69 * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
70 * for performance instead of the traditional way of using a stack trampoline
71 * which is rather slow.
72 * @regs: Process register set
73 * @ir: Instruction
74 */
75static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
76{
77 switch (MIPSInst_OPCODE(ir)) {
78 case addiu_op:
79 if (MIPSInst_RT(ir))
80 regs->regs[MIPSInst_RT(ir)] =
81 (s32)regs->regs[MIPSInst_RS(ir)] +
82 (s32)MIPSInst_SIMM(ir);
83 return 0;
84 case daddiu_op:
85 if (config_enabled(CONFIG_32BIT))
86 break;
87
88 if (MIPSInst_RT(ir))
89 regs->regs[MIPSInst_RT(ir)] =
90 (s64)regs->regs[MIPSInst_RS(ir)] +
91 (s64)MIPSInst_SIMM(ir);
92 return 0;
93 case lwc1_op:
94 case swc1_op:
95 case cop1_op:
96 case cop1x_op:
97 /* FPU instructions in delay slot */
98 return -SIGFPE;
99 case spec_op:
100 switch (MIPSInst_FUNC(ir)) {
101 case or_op:
102 if (MIPSInst_RD(ir))
103 regs->regs[MIPSInst_RD(ir)] =
104 regs->regs[MIPSInst_RS(ir)] |
105 regs->regs[MIPSInst_RT(ir)];
106 return 0;
107 case sll_op:
108 if (MIPSInst_RS(ir))
109 break;
110
111 if (MIPSInst_RD(ir))
112 regs->regs[MIPSInst_RD(ir)] =
113 (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
114 MIPSInst_FD(ir));
115 return 0;
116 case srl_op:
117 if (MIPSInst_RS(ir))
118 break;
119
120 if (MIPSInst_RD(ir))
121 regs->regs[MIPSInst_RD(ir)] =
122 (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
123 MIPSInst_FD(ir));
124 return 0;
125 case addu_op:
126 if (MIPSInst_FD(ir))
127 break;
128
129 if (MIPSInst_RD(ir))
130 regs->regs[MIPSInst_RD(ir)] =
131 (s32)((u32)regs->regs[MIPSInst_RS(ir)] +
132 (u32)regs->regs[MIPSInst_RT(ir)]);
133 return 0;
134 case subu_op:
135 if (MIPSInst_FD(ir))
136 break;
137
138 if (MIPSInst_RD(ir))
139 regs->regs[MIPSInst_RD(ir)] =
140 (s32)((u32)regs->regs[MIPSInst_RS(ir)] -
141 (u32)regs->regs[MIPSInst_RT(ir)]);
142 return 0;
143 case dsll_op:
144 if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
145 break;
146
147 if (MIPSInst_RD(ir))
148 regs->regs[MIPSInst_RD(ir)] =
149 (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
150 MIPSInst_FD(ir));
151 return 0;
152 case dsrl_op:
153 if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
154 break;
155
156 if (MIPSInst_RD(ir))
157 regs->regs[MIPSInst_RD(ir)] =
158 (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
159 MIPSInst_FD(ir));
160 return 0;
161 case daddu_op:
162 if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
163 break;
164
165 if (MIPSInst_RD(ir))
166 regs->regs[MIPSInst_RD(ir)] =
167 (u64)regs->regs[MIPSInst_RS(ir)] +
168 (u64)regs->regs[MIPSInst_RT(ir)];
169 return 0;
170 case dsubu_op:
171 if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
172 break;
173
174 if (MIPSInst_RD(ir))
175 regs->regs[MIPSInst_RD(ir)] =
176 (s64)((u64)regs->regs[MIPSInst_RS(ir)] -
177 (u64)regs->regs[MIPSInst_RT(ir)]);
178 return 0;
179 }
180 break;
181 default:
182 pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
183 ir, MIPSInst_OPCODE(ir));
184 }
185
186 return SIGILL;
187}
188
189/**
190 * movt_func - Emulate a MOVT instruction
191 * @regs: Process register set
192 * @ir: Instruction
193 *
194 * Returns 0 since it always succeeds.
195 */
196static int movf_func(struct pt_regs *regs, u32 ir)
197{
198 u32 csr;
199 u32 cond;
200
201 csr = current->thread.fpu.fcr31;
202 cond = fpucondbit[MIPSInst_RT(ir) >> 2];
203 if (((csr & cond) == 0) && MIPSInst_RD(ir))
204 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
205 MIPS_R2_STATS(movs);
206 return 0;
207}
208
209/**
210 * movt_func - Emulate a MOVT instruction
211 * @regs: Process register set
212 * @ir: Instruction
213 *
214 * Returns 0 since it always succeeds.
215 */
216static int movt_func(struct pt_regs *regs, u32 ir)
217{
218 u32 csr;
219 u32 cond;
220
221 csr = current->thread.fpu.fcr31;
222 cond = fpucondbit[MIPSInst_RT(ir) >> 2];
223
224 if (((csr & cond) != 0) && MIPSInst_RD(ir))
225 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
226
227 MIPS_R2_STATS(movs);
228
229 return 0;
230}
231
232/**
233 * jr_func - Emulate a JR instruction.
234 * @pt_regs: Process register set
235 * @ir: Instruction
236 *
237 * Returns SIGILL if JR was in delay slot, SIGEMT if we
238 * can't compute the EPC, SIGSEGV if we can't access the
239 * userland instruction or 0 on success.
240 */
241static int jr_func(struct pt_regs *regs, u32 ir)
242{
243 int err;
244 unsigned long cepc, epc, nepc;
245 u32 nir;
246
247 if (delay_slot(regs))
248 return SIGILL;
249
250 /* EPC after the RI/JR instruction */
251 nepc = regs->cp0_epc;
252 /* Roll back to the reserved R2 JR instruction */
253 regs->cp0_epc -= 4;
254 epc = regs->cp0_epc;
255 err = __compute_return_epc(regs);
256
257 if (err < 0)
258 return SIGEMT;
259
260
261 /* Computed EPC */
262 cepc = regs->cp0_epc;
263
264 /* Get DS instruction */
265 err = __get_user(nir, (u32 __user *)nepc);
266 if (err)
267 return SIGSEGV;
268
269 MIPS_R2BR_STATS(jrs);
270
271 /* If nir == 0(NOP), then nothing else to do */
272 if (nir) {
273 /*
274 * Negative err means FPU instruction in BD-slot,
275 * Zero err means 'BD-slot emulation done'
276 * For anything else we go back to trampoline emulation.
277 */
278 err = mipsr6_emul(regs, nir);
279 if (err > 0) {
280 regs->cp0_epc = nepc;
281 err = mips_dsemul(regs, nir, cepc);
282 if (err == SIGILL)
283 err = SIGEMT;
284 MIPS_R2_STATS(dsemul);
285 }
286 }
287
288 return err;
289}
290
291/**
292 * movz_func - Emulate a MOVZ instruction
293 * @regs: Process register set
294 * @ir: Instruction
295 *
296 * Returns 0 since it always succeeds.
297 */
298static int movz_func(struct pt_regs *regs, u32 ir)
299{
300 if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
301 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
302 MIPS_R2_STATS(movs);
303
304 return 0;
305}
306
307/**
308 * movn_func - Emulate a MOVZ instruction
309 * @regs: Process register set
310 * @ir: Instruction
311 *
312 * Returns 0 since it always succeeds.
313 */
314static int movn_func(struct pt_regs *regs, u32 ir)
315{
316 if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
317 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
318 MIPS_R2_STATS(movs);
319
320 return 0;
321}
322
323/**
324 * mfhi_func - Emulate a MFHI instruction
325 * @regs: Process register set
326 * @ir: Instruction
327 *
328 * Returns 0 since it always succeeds.
329 */
330static int mfhi_func(struct pt_regs *regs, u32 ir)
331{
332 if (MIPSInst_RD(ir))
333 regs->regs[MIPSInst_RD(ir)] = regs->hi;
334
335 MIPS_R2_STATS(hilo);
336
337 return 0;
338}
339
340/**
341 * mthi_func - Emulate a MTHI instruction
342 * @regs: Process register set
343 * @ir: Instruction
344 *
345 * Returns 0 since it always succeeds.
346 */
347static int mthi_func(struct pt_regs *regs, u32 ir)
348{
349 regs->hi = regs->regs[MIPSInst_RS(ir)];
350
351 MIPS_R2_STATS(hilo);
352
353 return 0;
354}
355
356/**
357 * mflo_func - Emulate a MFLO instruction
358 * @regs: Process register set
359 * @ir: Instruction
360 *
361 * Returns 0 since it always succeeds.
362 */
363static int mflo_func(struct pt_regs *regs, u32 ir)
364{
365 if (MIPSInst_RD(ir))
366 regs->regs[MIPSInst_RD(ir)] = regs->lo;
367
368 MIPS_R2_STATS(hilo);
369
370 return 0;
371}
372
373/**
374 * mtlo_func - Emulate a MTLO instruction
375 * @regs: Process register set
376 * @ir: Instruction
377 *
378 * Returns 0 since it always succeeds.
379 */
380static int mtlo_func(struct pt_regs *regs, u32 ir)
381{
382 regs->lo = regs->regs[MIPSInst_RS(ir)];
383
384 MIPS_R2_STATS(hilo);
385
386 return 0;
387}
388
389/**
390 * mult_func - Emulate a MULT instruction
391 * @regs: Process register set
392 * @ir: Instruction
393 *
394 * Returns 0 since it always succeeds.
395 */
396static int mult_func(struct pt_regs *regs, u32 ir)
397{
398 s64 res;
399 s32 rt, rs;
400
401 rt = regs->regs[MIPSInst_RT(ir)];
402 rs = regs->regs[MIPSInst_RS(ir)];
403 res = (s64)rt * (s64)rs;
404
405 rs = res;
406 regs->lo = (s64)rs;
407 rt = res >> 32;
408 res = (s64)rt;
409 regs->hi = res;
410
411 MIPS_R2_STATS(muls);
412
413 return 0;
414}
415
416/**
417 * multu_func - Emulate a MULTU instruction
418 * @regs: Process register set
419 * @ir: Instruction
420 *
421 * Returns 0 since it always succeeds.
422 */
423static int multu_func(struct pt_regs *regs, u32 ir)
424{
425 u64 res;
426 u32 rt, rs;
427
428 rt = regs->regs[MIPSInst_RT(ir)];
429 rs = regs->regs[MIPSInst_RS(ir)];
430 res = (u64)rt * (u64)rs;
431 rt = res;
432 regs->lo = (s64)rt;
433 regs->hi = (s64)(res >> 32);
434
435 MIPS_R2_STATS(muls);
436
437 return 0;
438}
439
440/**
441 * div_func - Emulate a DIV instruction
442 * @regs: Process register set
443 * @ir: Instruction
444 *
445 * Returns 0 since it always succeeds.
446 */
447static int div_func(struct pt_regs *regs, u32 ir)
448{
449 s32 rt, rs;
450
451 rt = regs->regs[MIPSInst_RT(ir)];
452 rs = regs->regs[MIPSInst_RS(ir)];
453
454 regs->lo = (s64)(rs / rt);
455 regs->hi = (s64)(rs % rt);
456
457 MIPS_R2_STATS(divs);
458
459 return 0;
460}
461
462/**
463 * divu_func - Emulate a DIVU instruction
464 * @regs: Process register set
465 * @ir: Instruction
466 *
467 * Returns 0 since it always succeeds.
468 */
469static int divu_func(struct pt_regs *regs, u32 ir)
470{
471 u32 rt, rs;
472
473 rt = regs->regs[MIPSInst_RT(ir)];
474 rs = regs->regs[MIPSInst_RS(ir)];
475
476 regs->lo = (s64)(rs / rt);
477 regs->hi = (s64)(rs % rt);
478
479 MIPS_R2_STATS(divs);
480
481 return 0;
482}
483
484/**
485 * dmult_func - Emulate a DMULT instruction
486 * @regs: Process register set
487 * @ir: Instruction
488 *
489 * Returns 0 on success or SIGILL for 32-bit kernels.
490 */
491static int dmult_func(struct pt_regs *regs, u32 ir)
492{
493 s64 res;
494 s64 rt, rs;
495
496 if (config_enabled(CONFIG_32BIT))
497 return SIGILL;
498
499 rt = regs->regs[MIPSInst_RT(ir)];
500 rs = regs->regs[MIPSInst_RS(ir)];
501 res = rt * rs;
502
503 regs->lo = res;
504 __asm__ __volatile__(
505 "dmuh %0, %1, %2\t\n"
506 : "=r"(res)
507 : "r"(rt), "r"(rs));
508
509 regs->hi = res;
510
511 MIPS_R2_STATS(muls);
512
513 return 0;
514}
515
516/**
517 * dmultu_func - Emulate a DMULTU instruction
518 * @regs: Process register set
519 * @ir: Instruction
520 *
521 * Returns 0 on success or SIGILL for 32-bit kernels.
522 */
523static int dmultu_func(struct pt_regs *regs, u32 ir)
524{
525 u64 res;
526 u64 rt, rs;
527
528 if (config_enabled(CONFIG_32BIT))
529 return SIGILL;
530
531 rt = regs->regs[MIPSInst_RT(ir)];
532 rs = regs->regs[MIPSInst_RS(ir)];
533 res = rt * rs;
534
535 regs->lo = res;
536 __asm__ __volatile__(
537 "dmuhu %0, %1, %2\t\n"
538 : "=r"(res)
539 : "r"(rt), "r"(rs));
540
541 regs->hi = res;
542
543 MIPS_R2_STATS(muls);
544
545 return 0;
546}
547
548/**
549 * ddiv_func - Emulate a DDIV instruction
550 * @regs: Process register set
551 * @ir: Instruction
552 *
553 * Returns 0 on success or SIGILL for 32-bit kernels.
554 */
555static int ddiv_func(struct pt_regs *regs, u32 ir)
556{
557 s64 rt, rs;
558
559 if (config_enabled(CONFIG_32BIT))
560 return SIGILL;
561
562 rt = regs->regs[MIPSInst_RT(ir)];
563 rs = regs->regs[MIPSInst_RS(ir)];
564
565 regs->lo = rs / rt;
566 regs->hi = rs % rt;
567
568 MIPS_R2_STATS(divs);
569
570 return 0;
571}
572
573/**
574 * ddivu_func - Emulate a DDIVU instruction
575 * @regs: Process register set
576 * @ir: Instruction
577 *
578 * Returns 0 on success or SIGILL for 32-bit kernels.
579 */
580static int ddivu_func(struct pt_regs *regs, u32 ir)
581{
582 u64 rt, rs;
583
584 if (config_enabled(CONFIG_32BIT))
585 return SIGILL;
586
587 rt = regs->regs[MIPSInst_RT(ir)];
588 rs = regs->regs[MIPSInst_RS(ir)];
589
590 regs->lo = rs / rt;
591 regs->hi = rs % rt;
592
593 MIPS_R2_STATS(divs);
594
595 return 0;
596}
597
598/* R6 removed instructions for the SPECIAL opcode */
599static struct r2_decoder_table spec_op_table[] = {
600 { 0xfc1ff83f, 0x00000008, jr_func },
601 { 0xfc00ffff, 0x00000018, mult_func },
602 { 0xfc00ffff, 0x00000019, multu_func },
603 { 0xfc00ffff, 0x0000001c, dmult_func },
604 { 0xfc00ffff, 0x0000001d, dmultu_func },
605 { 0xffff07ff, 0x00000010, mfhi_func },
606 { 0xfc1fffff, 0x00000011, mthi_func },
607 { 0xffff07ff, 0x00000012, mflo_func },
608 { 0xfc1fffff, 0x00000013, mtlo_func },
609 { 0xfc0307ff, 0x00000001, movf_func },
610 { 0xfc0307ff, 0x00010001, movt_func },
611 { 0xfc0007ff, 0x0000000a, movz_func },
612 { 0xfc0007ff, 0x0000000b, movn_func },
613 { 0xfc00ffff, 0x0000001a, div_func },
614 { 0xfc00ffff, 0x0000001b, divu_func },
615 { 0xfc00ffff, 0x0000001e, ddiv_func },
616 { 0xfc00ffff, 0x0000001f, ddivu_func },
617 {}
618};
619
620/**
621 * madd_func - Emulate a MADD instruction
622 * @regs: Process register set
623 * @ir: Instruction
624 *
625 * Returns 0 since it always succeeds.
626 */
627static int madd_func(struct pt_regs *regs, u32 ir)
628{
629 s64 res;
630 s32 rt, rs;
631
632 rt = regs->regs[MIPSInst_RT(ir)];
633 rs = regs->regs[MIPSInst_RS(ir)];
634 res = (s64)rt * (s64)rs;
635 rt = regs->hi;
636 rs = regs->lo;
637 res += ((((s64)rt) << 32) | (u32)rs);
638
639 rt = res;
640 regs->lo = (s64)rt;
641 rs = res >> 32;
642 regs->hi = (s64)rs;
643
644 MIPS_R2_STATS(dsps);
645
646 return 0;
647}
648
649/**
650 * maddu_func - Emulate a MADDU instruction
651 * @regs: Process register set
652 * @ir: Instruction
653 *
654 * Returns 0 since it always succeeds.
655 */
656static int maddu_func(struct pt_regs *regs, u32 ir)
657{
658 u64 res;
659 u32 rt, rs;
660
661 rt = regs->regs[MIPSInst_RT(ir)];
662 rs = regs->regs[MIPSInst_RS(ir)];
663 res = (u64)rt * (u64)rs;
664 rt = regs->hi;
665 rs = regs->lo;
666 res += ((((s64)rt) << 32) | (u32)rs);
667
668 rt = res;
669 regs->lo = (s64)rt;
670 rs = res >> 32;
671 regs->hi = (s64)rs;
672
673 MIPS_R2_STATS(dsps);
674
675 return 0;
676}
677
678/**
679 * msub_func - Emulate a MSUB instruction
680 * @regs: Process register set
681 * @ir: Instruction
682 *
683 * Returns 0 since it always succeeds.
684 */
685static int msub_func(struct pt_regs *regs, u32 ir)
686{
687 s64 res;
688 s32 rt, rs;
689
690 rt = regs->regs[MIPSInst_RT(ir)];
691 rs = regs->regs[MIPSInst_RS(ir)];
692 res = (s64)rt * (s64)rs;
693 rt = regs->hi;
694 rs = regs->lo;
695 res = ((((s64)rt) << 32) | (u32)rs) - res;
696
697 rt = res;
698 regs->lo = (s64)rt;
699 rs = res >> 32;
700 regs->hi = (s64)rs;
701
702 MIPS_R2_STATS(dsps);
703
704 return 0;
705}
706
707/**
708 * msubu_func - Emulate a MSUBU instruction
709 * @regs: Process register set
710 * @ir: Instruction
711 *
712 * Returns 0 since it always succeeds.
713 */
714static int msubu_func(struct pt_regs *regs, u32 ir)
715{
716 u64 res;
717 u32 rt, rs;
718
719 rt = regs->regs[MIPSInst_RT(ir)];
720 rs = regs->regs[MIPSInst_RS(ir)];
721 res = (u64)rt * (u64)rs;
722 rt = regs->hi;
723 rs = regs->lo;
724 res = ((((s64)rt) << 32) | (u32)rs) - res;
725
726 rt = res;
727 regs->lo = (s64)rt;
728 rs = res >> 32;
729 regs->hi = (s64)rs;
730
731 MIPS_R2_STATS(dsps);
732
733 return 0;
734}
735
736/**
737 * mul_func - Emulate a MUL instruction
738 * @regs: Process register set
739 * @ir: Instruction
740 *
741 * Returns 0 since it always succeeds.
742 */
743static int mul_func(struct pt_regs *regs, u32 ir)
744{
745 s64 res;
746 s32 rt, rs;
747
748 if (!MIPSInst_RD(ir))
749 return 0;
750 rt = regs->regs[MIPSInst_RT(ir)];
751 rs = regs->regs[MIPSInst_RS(ir)];
752 res = (s64)rt * (s64)rs;
753
754 rs = res;
755 regs->regs[MIPSInst_RD(ir)] = (s64)rs;
756
757 MIPS_R2_STATS(muls);
758
759 return 0;
760}
761
762/**
763 * clz_func - Emulate a CLZ instruction
764 * @regs: Process register set
765 * @ir: Instruction
766 *
767 * Returns 0 since it always succeeds.
768 */
769static int clz_func(struct pt_regs *regs, u32 ir)
770{
771 u32 res;
772 u32 rs;
773
774 if (!MIPSInst_RD(ir))
775 return 0;
776
777 rs = regs->regs[MIPSInst_RS(ir)];
778 __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
779 regs->regs[MIPSInst_RD(ir)] = res;
780
781 MIPS_R2_STATS(bops);
782
783 return 0;
784}
785
786/**
787 * clo_func - Emulate a CLO instruction
788 * @regs: Process register set
789 * @ir: Instruction
790 *
791 * Returns 0 since it always succeeds.
792 */
793
794static int clo_func(struct pt_regs *regs, u32 ir)
795{
796 u32 res;
797 u32 rs;
798
799 if (!MIPSInst_RD(ir))
800 return 0;
801
802 rs = regs->regs[MIPSInst_RS(ir)];
803 __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
804 regs->regs[MIPSInst_RD(ir)] = res;
805
806 MIPS_R2_STATS(bops);
807
808 return 0;
809}
810
811/**
812 * dclz_func - Emulate a DCLZ instruction
813 * @regs: Process register set
814 * @ir: Instruction
815 *
816 * Returns 0 since it always succeeds.
817 */
818static int dclz_func(struct pt_regs *regs, u32 ir)
819{
820 u64 res;
821 u64 rs;
822
823 if (config_enabled(CONFIG_32BIT))
824 return SIGILL;
825
826 if (!MIPSInst_RD(ir))
827 return 0;
828
829 rs = regs->regs[MIPSInst_RS(ir)];
830 __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
831 regs->regs[MIPSInst_RD(ir)] = res;
832
833 MIPS_R2_STATS(bops);
834
835 return 0;
836}
837
838/**
839 * dclo_func - Emulate a DCLO instruction
840 * @regs: Process register set
841 * @ir: Instruction
842 *
843 * Returns 0 since it always succeeds.
844 */
845static int dclo_func(struct pt_regs *regs, u32 ir)
846{
847 u64 res;
848 u64 rs;
849
850 if (config_enabled(CONFIG_32BIT))
851 return SIGILL;
852
853 if (!MIPSInst_RD(ir))
854 return 0;
855
856 rs = regs->regs[MIPSInst_RS(ir)];
857 __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
858 regs->regs[MIPSInst_RD(ir)] = res;
859
860 MIPS_R2_STATS(bops);
861
862 return 0;
863}
864
865/* R6 removed instructions for the SPECIAL2 opcode */
866static struct r2_decoder_table spec2_op_table[] = {
867 { 0xfc00ffff, 0x70000000, madd_func },
868 { 0xfc00ffff, 0x70000001, maddu_func },
869 { 0xfc0007ff, 0x70000002, mul_func },
870 { 0xfc00ffff, 0x70000004, msub_func },
871 { 0xfc00ffff, 0x70000005, msubu_func },
872 { 0xfc0007ff, 0x70000020, clz_func },
873 { 0xfc0007ff, 0x70000021, clo_func },
874 { 0xfc0007ff, 0x70000024, dclz_func },
875 { 0xfc0007ff, 0x70000025, dclo_func },
876 { }
877};
878
879static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
880 struct r2_decoder_table *table)
881{
882 struct r2_decoder_table *p;
883 int err;
884
885 for (p = table; p->func; p++) {
886 if ((inst & p->mask) == p->code) {
887 err = (p->func)(regs, inst);
888 return err;
889 }
890 }
891 return SIGILL;
892}
893
894/**
895 * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
896 * @regs: Process register set
897 * @inst: Instruction to decode and emulate
898 */
899int mipsr2_decoder(struct pt_regs *regs, u32 inst)
900{
901 int err = 0;
902 unsigned long vaddr;
903 u32 nir;
904 unsigned long cpc, epc, nepc, r31, res, rs, rt;
905
906 void __user *fault_addr = NULL;
907 int pass = 0;
908
909repeat:
910 r31 = regs->regs[31];
911 epc = regs->cp0_epc;
912 err = compute_return_epc(regs);
913 if (err < 0) {
914 BUG();
915 return SIGEMT;
916 }
917 pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
918 inst, epc, pass);
919
920 switch (MIPSInst_OPCODE(inst)) {
921 case spec_op:
922 err = mipsr2_find_op_func(regs, inst, spec_op_table);
923 if (err < 0) {
924 /* FPU instruction under JR */
925 regs->cp0_cause |= CAUSEF_BD;
926 goto fpu_emul;
927 }
928 break;
929 case spec2_op:
930 err = mipsr2_find_op_func(regs, inst, spec2_op_table);
931 break;
932 case bcond_op:
933 rt = MIPSInst_RT(inst);
934 rs = MIPSInst_RS(inst);
935 switch (rt) {
936 case tgei_op:
937 if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
938 do_trap_or_bp(regs, 0, "TGEI");
939
940 MIPS_R2_STATS(traps);
941
942 break;
943 case tgeiu_op:
944 if (regs->regs[rs] >= MIPSInst_UIMM(inst))
945 do_trap_or_bp(regs, 0, "TGEIU");
946
947 MIPS_R2_STATS(traps);
948
949 break;
950 case tlti_op:
951 if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
952 do_trap_or_bp(regs, 0, "TLTI");
953
954 MIPS_R2_STATS(traps);
955
956 break;
957 case tltiu_op:
958 if (regs->regs[rs] < MIPSInst_UIMM(inst))
959 do_trap_or_bp(regs, 0, "TLTIU");
960
961 MIPS_R2_STATS(traps);
962
963 break;
964 case teqi_op:
965 if (regs->regs[rs] == MIPSInst_SIMM(inst))
966 do_trap_or_bp(regs, 0, "TEQI");
967
968 MIPS_R2_STATS(traps);
969
970 break;
971 case tnei_op:
972 if (regs->regs[rs] != MIPSInst_SIMM(inst))
973 do_trap_or_bp(regs, 0, "TNEI");
974
975 MIPS_R2_STATS(traps);
976
977 break;
978 case bltzl_op:
979 case bgezl_op:
980 case bltzall_op:
981 case bgezall_op:
982 if (delay_slot(regs)) {
983 err = SIGILL;
984 break;
985 }
986 regs->regs[31] = r31;
987 regs->cp0_epc = epc;
988 err = __compute_return_epc(regs);
989 if (err < 0)
990 return SIGEMT;
991 if (err != BRANCH_LIKELY_TAKEN)
992 break;
993 cpc = regs->cp0_epc;
994 nepc = epc + 4;
995 err = __get_user(nir, (u32 __user *)nepc);
996 if (err) {
997 err = SIGSEGV;
998 break;
999 }
1000 /*
1001 * This will probably be optimized away when
1002 * CONFIG_DEBUG_FS is not enabled
1003 */
1004 switch (rt) {
1005 case bltzl_op:
1006 MIPS_R2BR_STATS(bltzl);
1007 break;
1008 case bgezl_op:
1009 MIPS_R2BR_STATS(bgezl);
1010 break;
1011 case bltzall_op:
1012 MIPS_R2BR_STATS(bltzall);
1013 break;
1014 case bgezall_op:
1015 MIPS_R2BR_STATS(bgezall);
1016 break;
1017 }
1018
1019 switch (MIPSInst_OPCODE(nir)) {
1020 case cop1_op:
1021 case cop1x_op:
1022 case lwc1_op:
1023 case swc1_op:
1024 regs->cp0_cause |= CAUSEF_BD;
1025 goto fpu_emul;
1026 }
1027 if (nir) {
1028 err = mipsr6_emul(regs, nir);
1029 if (err > 0) {
1030 err = mips_dsemul(regs, nir, cpc);
1031 if (err == SIGILL)
1032 err = SIGEMT;
1033 MIPS_R2_STATS(dsemul);
1034 }
1035 }
1036 break;
1037 case bltzal_op:
1038 case bgezal_op:
1039 if (delay_slot(regs)) {
1040 err = SIGILL;
1041 break;
1042 }
1043 regs->regs[31] = r31;
1044 regs->cp0_epc = epc;
1045 err = __compute_return_epc(regs);
1046 if (err < 0)
1047 return SIGEMT;
1048 cpc = regs->cp0_epc;
1049 nepc = epc + 4;
1050 err = __get_user(nir, (u32 __user *)nepc);
1051 if (err) {
1052 err = SIGSEGV;
1053 break;
1054 }
1055 /*
1056 * This will probably be optimized away when
1057 * CONFIG_DEBUG_FS is not enabled
1058 */
1059 switch (rt) {
1060 case bltzal_op:
1061 MIPS_R2BR_STATS(bltzal);
1062 break;
1063 case bgezal_op:
1064 MIPS_R2BR_STATS(bgezal);
1065 break;
1066 }
1067
1068 switch (MIPSInst_OPCODE(nir)) {
1069 case cop1_op:
1070 case cop1x_op:
1071 case lwc1_op:
1072 case swc1_op:
1073 regs->cp0_cause |= CAUSEF_BD;
1074 goto fpu_emul;
1075 }
1076 if (nir) {
1077 err = mipsr6_emul(regs, nir);
1078 if (err > 0) {
1079 err = mips_dsemul(regs, nir, cpc);
1080 if (err == SIGILL)
1081 err = SIGEMT;
1082 MIPS_R2_STATS(dsemul);
1083 }
1084 }
1085 break;
1086 default:
1087 regs->regs[31] = r31;
1088 regs->cp0_epc = epc;
1089 err = SIGILL;
1090 break;
1091 }
1092 break;
1093
1094 case beql_op:
1095 case bnel_op:
1096 case blezl_op:
1097 case bgtzl_op:
1098 if (delay_slot(regs)) {
1099 err = SIGILL;
1100 break;
1101 }
1102 regs->regs[31] = r31;
1103 regs->cp0_epc = epc;
1104 err = __compute_return_epc(regs);
1105 if (err < 0)
1106 return SIGEMT;
1107 if (err != BRANCH_LIKELY_TAKEN)
1108 break;
1109 cpc = regs->cp0_epc;
1110 nepc = epc + 4;
1111 err = __get_user(nir, (u32 __user *)nepc);
1112 if (err) {
1113 err = SIGSEGV;
1114 break;
1115 }
1116 /*
1117 * This will probably be optimized away when
1118 * CONFIG_DEBUG_FS is not enabled
1119 */
1120 switch (MIPSInst_OPCODE(inst)) {
1121 case beql_op:
1122 MIPS_R2BR_STATS(beql);
1123 break;
1124 case bnel_op:
1125 MIPS_R2BR_STATS(bnel);
1126 break;
1127 case blezl_op:
1128 MIPS_R2BR_STATS(blezl);
1129 break;
1130 case bgtzl_op:
1131 MIPS_R2BR_STATS(bgtzl);
1132 break;
1133 }
1134
1135 switch (MIPSInst_OPCODE(nir)) {
1136 case cop1_op:
1137 case cop1x_op:
1138 case lwc1_op:
1139 case swc1_op:
1140 regs->cp0_cause |= CAUSEF_BD;
1141 goto fpu_emul;
1142 }
1143 if (nir) {
1144 err = mipsr6_emul(regs, nir);
1145 if (err > 0) {
1146 err = mips_dsemul(regs, nir, cpc);
1147 if (err == SIGILL)
1148 err = SIGEMT;
1149 MIPS_R2_STATS(dsemul);
1150 }
1151 }
1152 break;
1153 case lwc1_op:
1154 case swc1_op:
1155 case cop1_op:
1156 case cop1x_op:
1157fpu_emul:
1158 regs->regs[31] = r31;
1159 regs->cp0_epc = epc;
1160 if (!used_math()) { /* First time FPU user. */
1161 err = init_fpu();
1162 set_used_math();
1163 }
1164 lose_fpu(1); /* Save FPU state for the emulator. */
1165
1166 err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1167 &fault_addr);
1168
1169 /*
1170 * this is a tricky issue - lose_fpu() uses LL/SC atomics
1171 * if FPU is owned and effectively cancels user level LL/SC.
1172 * So, it could be logical to don't restore FPU ownership here.
1173 * But the sequence of multiple FPU instructions is much much
1174 * more often than LL-FPU-SC and I prefer loop here until
1175 * next scheduler cycle cancels FPU ownership
1176 */
1177 own_fpu(1); /* Restore FPU state. */
1178
1179 if (err)
1180 current->thread.cp0_baduaddr = (unsigned long)fault_addr;
1181
1182 MIPS_R2_STATS(fpus);
1183
1184 break;
1185
1186 case lwl_op:
1187 rt = regs->regs[MIPSInst_RT(inst)];
1188 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1189 if (!access_ok(VERIFY_READ, vaddr, 4)) {
1190 current->thread.cp0_baduaddr = vaddr;
1191 err = SIGSEGV;
1192 break;
1193 }
1194 __asm__ __volatile__(
1195 " .set push\n"
1196 " .set reorder\n"
1197#ifdef CONFIG_CPU_LITTLE_ENDIAN
1198 "1:" LB "%1, 0(%2)\n"
1199 INS "%0, %1, 24, 8\n"
1200 " andi %1, %2, 0x3\n"
1201 " beq $0, %1, 9f\n"
1202 ADDIU "%2, %2, -1\n"
1203 "2:" LB "%1, 0(%2)\n"
1204 INS "%0, %1, 16, 8\n"
1205 " andi %1, %2, 0x3\n"
1206 " beq $0, %1, 9f\n"
1207 ADDIU "%2, %2, -1\n"
1208 "3:" LB "%1, 0(%2)\n"
1209 INS "%0, %1, 8, 8\n"
1210 " andi %1, %2, 0x3\n"
1211 " beq $0, %1, 9f\n"
1212 ADDIU "%2, %2, -1\n"
1213 "4:" LB "%1, 0(%2)\n"
1214 INS "%0, %1, 0, 8\n"
1215#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1216 "1:" LB "%1, 0(%2)\n"
1217 INS "%0, %1, 24, 8\n"
1218 ADDIU "%2, %2, 1\n"
1219 " andi %1, %2, 0x3\n"
1220 " beq $0, %1, 9f\n"
1221 "2:" LB "%1, 0(%2)\n"
1222 INS "%0, %1, 16, 8\n"
1223 ADDIU "%2, %2, 1\n"
1224 " andi %1, %2, 0x3\n"
1225 " beq $0, %1, 9f\n"
1226 "3:" LB "%1, 0(%2)\n"
1227 INS "%0, %1, 8, 8\n"
1228 ADDIU "%2, %2, 1\n"
1229 " andi %1, %2, 0x3\n"
1230 " beq $0, %1, 9f\n"
1231 "4:" LB "%1, 0(%2)\n"
1232 INS "%0, %1, 0, 8\n"
1233#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1234 "9: sll %0, %0, 0\n"
1235 "10:\n"
1236 " .insn\n"
1237 " .section .fixup,\"ax\"\n"
1238 "8: li %3,%4\n"
1239 " j 10b\n"
1240 " .previous\n"
1241 " .section __ex_table,\"a\"\n"
1242 " .word 1b,8b\n"
1243 " .word 2b,8b\n"
1244 " .word 3b,8b\n"
1245 " .word 4b,8b\n"
1246 " .previous\n"
1247 " .set pop\n"
1248 : "+&r"(rt), "=&r"(rs),
1249 "+&r"(vaddr), "+&r"(err)
1250 : "i"(SIGSEGV));
1251
1252 if (MIPSInst_RT(inst) && !err)
1253 regs->regs[MIPSInst_RT(inst)] = rt;
1254
1255 MIPS_R2_STATS(loads);
1256
1257 break;
1258
1259 case lwr_op:
1260 rt = regs->regs[MIPSInst_RT(inst)];
1261 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1262 if (!access_ok(VERIFY_READ, vaddr, 4)) {
1263 current->thread.cp0_baduaddr = vaddr;
1264 err = SIGSEGV;
1265 break;
1266 }
1267 __asm__ __volatile__(
1268 " .set push\n"
1269 " .set reorder\n"
1270#ifdef CONFIG_CPU_LITTLE_ENDIAN
1271 "1:" LB "%1, 0(%2)\n"
1272 INS "%0, %1, 0, 8\n"
1273 ADDIU "%2, %2, 1\n"
1274 " andi %1, %2, 0x3\n"
1275 " beq $0, %1, 9f\n"
1276 "2:" LB "%1, 0(%2)\n"
1277 INS "%0, %1, 8, 8\n"
1278 ADDIU "%2, %2, 1\n"
1279 " andi %1, %2, 0x3\n"
1280 " beq $0, %1, 9f\n"
1281 "3:" LB "%1, 0(%2)\n"
1282 INS "%0, %1, 16, 8\n"
1283 ADDIU "%2, %2, 1\n"
1284 " andi %1, %2, 0x3\n"
1285 " beq $0, %1, 9f\n"
1286 "4:" LB "%1, 0(%2)\n"
1287 INS "%0, %1, 24, 8\n"
1288 " sll %0, %0, 0\n"
1289#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1290 "1:" LB "%1, 0(%2)\n"
1291 INS "%0, %1, 0, 8\n"
1292 " andi %1, %2, 0x3\n"
1293 " beq $0, %1, 9f\n"
1294 ADDIU "%2, %2, -1\n"
1295 "2:" LB "%1, 0(%2)\n"
1296 INS "%0, %1, 8, 8\n"
1297 " andi %1, %2, 0x3\n"
1298 " beq $0, %1, 9f\n"
1299 ADDIU "%2, %2, -1\n"
1300 "3:" LB "%1, 0(%2)\n"
1301 INS "%0, %1, 16, 8\n"
1302 " andi %1, %2, 0x3\n"
1303 " beq $0, %1, 9f\n"
1304 ADDIU "%2, %2, -1\n"
1305 "4:" LB "%1, 0(%2)\n"
1306 INS "%0, %1, 24, 8\n"
1307 " sll %0, %0, 0\n"
1308#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1309 "9:\n"
1310 "10:\n"
1311 " .insn\n"
1312 " .section .fixup,\"ax\"\n"
1313 "8: li %3,%4\n"
1314 " j 10b\n"
1315 " .previous\n"
1316 " .section __ex_table,\"a\"\n"
1317 " .word 1b,8b\n"
1318 " .word 2b,8b\n"
1319 " .word 3b,8b\n"
1320 " .word 4b,8b\n"
1321 " .previous\n"
1322 " .set pop\n"
1323 : "+&r"(rt), "=&r"(rs),
1324 "+&r"(vaddr), "+&r"(err)
1325 : "i"(SIGSEGV));
1326 if (MIPSInst_RT(inst) && !err)
1327 regs->regs[MIPSInst_RT(inst)] = rt;
1328
1329 MIPS_R2_STATS(loads);
1330
1331 break;
1332
1333 case swl_op:
1334 rt = regs->regs[MIPSInst_RT(inst)];
1335 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1336 if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
1337 current->thread.cp0_baduaddr = vaddr;
1338 err = SIGSEGV;
1339 break;
1340 }
1341 __asm__ __volatile__(
1342 " .set push\n"
1343 " .set reorder\n"
1344#ifdef CONFIG_CPU_LITTLE_ENDIAN
1345 EXT "%1, %0, 24, 8\n"
1346 "1:" SB "%1, 0(%2)\n"
1347 " andi %1, %2, 0x3\n"
1348 " beq $0, %1, 9f\n"
1349 ADDIU "%2, %2, -1\n"
1350 EXT "%1, %0, 16, 8\n"
1351 "2:" SB "%1, 0(%2)\n"
1352 " andi %1, %2, 0x3\n"
1353 " beq $0, %1, 9f\n"
1354 ADDIU "%2, %2, -1\n"
1355 EXT "%1, %0, 8, 8\n"
1356 "3:" SB "%1, 0(%2)\n"
1357 " andi %1, %2, 0x3\n"
1358 " beq $0, %1, 9f\n"
1359 ADDIU "%2, %2, -1\n"
1360 EXT "%1, %0, 0, 8\n"
1361 "4:" SB "%1, 0(%2)\n"
1362#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1363 EXT "%1, %0, 24, 8\n"
1364 "1:" SB "%1, 0(%2)\n"
1365 ADDIU "%2, %2, 1\n"
1366 " andi %1, %2, 0x3\n"
1367 " beq $0, %1, 9f\n"
1368 EXT "%1, %0, 16, 8\n"
1369 "2:" SB "%1, 0(%2)\n"
1370 ADDIU "%2, %2, 1\n"
1371 " andi %1, %2, 0x3\n"
1372 " beq $0, %1, 9f\n"
1373 EXT "%1, %0, 8, 8\n"
1374 "3:" SB "%1, 0(%2)\n"
1375 ADDIU "%2, %2, 1\n"
1376 " andi %1, %2, 0x3\n"
1377 " beq $0, %1, 9f\n"
1378 EXT "%1, %0, 0, 8\n"
1379 "4:" SB "%1, 0(%2)\n"
1380#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1381 "9:\n"
1382 " .insn\n"
1383 " .section .fixup,\"ax\"\n"
1384 "8: li %3,%4\n"
1385 " j 9b\n"
1386 " .previous\n"
1387 " .section __ex_table,\"a\"\n"
1388 " .word 1b,8b\n"
1389 " .word 2b,8b\n"
1390 " .word 3b,8b\n"
1391 " .word 4b,8b\n"
1392 " .previous\n"
1393 " .set pop\n"
1394 : "+&r"(rt), "=&r"(rs),
1395 "+&r"(vaddr), "+&r"(err)
1396 : "i"(SIGSEGV)
1397 : "memory");
1398
1399 MIPS_R2_STATS(stores);
1400
1401 break;
1402
1403 case swr_op:
1404 rt = regs->regs[MIPSInst_RT(inst)];
1405 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1406 if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
1407 current->thread.cp0_baduaddr = vaddr;
1408 err = SIGSEGV;
1409 break;
1410 }
1411 __asm__ __volatile__(
1412 " .set push\n"
1413 " .set reorder\n"
1414#ifdef CONFIG_CPU_LITTLE_ENDIAN
1415 EXT "%1, %0, 0, 8\n"
1416 "1:" SB "%1, 0(%2)\n"
1417 ADDIU "%2, %2, 1\n"
1418 " andi %1, %2, 0x3\n"
1419 " beq $0, %1, 9f\n"
1420 EXT "%1, %0, 8, 8\n"
1421 "2:" SB "%1, 0(%2)\n"
1422 ADDIU "%2, %2, 1\n"
1423 " andi %1, %2, 0x3\n"
1424 " beq $0, %1, 9f\n"
1425 EXT "%1, %0, 16, 8\n"
1426 "3:" SB "%1, 0(%2)\n"
1427 ADDIU "%2, %2, 1\n"
1428 " andi %1, %2, 0x3\n"
1429 " beq $0, %1, 9f\n"
1430 EXT "%1, %0, 24, 8\n"
1431 "4:" SB "%1, 0(%2)\n"
1432#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1433 EXT "%1, %0, 0, 8\n"
1434 "1:" SB "%1, 0(%2)\n"
1435 " andi %1, %2, 0x3\n"
1436 " beq $0, %1, 9f\n"
1437 ADDIU "%2, %2, -1\n"
1438 EXT "%1, %0, 8, 8\n"
1439 "2:" SB "%1, 0(%2)\n"
1440 " andi %1, %2, 0x3\n"
1441 " beq $0, %1, 9f\n"
1442 ADDIU "%2, %2, -1\n"
1443 EXT "%1, %0, 16, 8\n"
1444 "3:" SB "%1, 0(%2)\n"
1445 " andi %1, %2, 0x3\n"
1446 " beq $0, %1, 9f\n"
1447 ADDIU "%2, %2, -1\n"
1448 EXT "%1, %0, 24, 8\n"
1449 "4:" SB "%1, 0(%2)\n"
1450#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1451 "9:\n"
1452 " .insn\n"
1453 " .section .fixup,\"ax\"\n"
1454 "8: li %3,%4\n"
1455 " j 9b\n"
1456 " .previous\n"
1457 " .section __ex_table,\"a\"\n"
1458 " .word 1b,8b\n"
1459 " .word 2b,8b\n"
1460 " .word 3b,8b\n"
1461 " .word 4b,8b\n"
1462 " .previous\n"
1463 " .set pop\n"
1464 : "+&r"(rt), "=&r"(rs),
1465 "+&r"(vaddr), "+&r"(err)
1466 : "i"(SIGSEGV)
1467 : "memory");
1468
1469 MIPS_R2_STATS(stores);
1470
1471 break;
1472
1473 case ldl_op:
1474 if (config_enabled(CONFIG_32BIT)) {
1475 err = SIGILL;
1476 break;
1477 }
1478
1479 rt = regs->regs[MIPSInst_RT(inst)];
1480 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1481 if (!access_ok(VERIFY_READ, vaddr, 8)) {
1482 current->thread.cp0_baduaddr = vaddr;
1483 err = SIGSEGV;
1484 break;
1485 }
1486 __asm__ __volatile__(
1487 " .set push\n"
1488 " .set reorder\n"
1489#ifdef CONFIG_CPU_LITTLE_ENDIAN
1490 "1: lb %1, 0(%2)\n"
1491 " dinsu %0, %1, 56, 8\n"
1492 " andi %1, %2, 0x7\n"
1493 " beq $0, %1, 9f\n"
1494 " daddiu %2, %2, -1\n"
1495 "2: lb %1, 0(%2)\n"
1496 " dinsu %0, %1, 48, 8\n"
1497 " andi %1, %2, 0x7\n"
1498 " beq $0, %1, 9f\n"
1499 " daddiu %2, %2, -1\n"
1500 "3: lb %1, 0(%2)\n"
1501 " dinsu %0, %1, 40, 8\n"
1502 " andi %1, %2, 0x7\n"
1503 " beq $0, %1, 9f\n"
1504 " daddiu %2, %2, -1\n"
1505 "4: lb %1, 0(%2)\n"
1506 " dinsu %0, %1, 32, 8\n"
1507 " andi %1, %2, 0x7\n"
1508 " beq $0, %1, 9f\n"
1509 " daddiu %2, %2, -1\n"
1510 "5: lb %1, 0(%2)\n"
1511 " dins %0, %1, 24, 8\n"
1512 " andi %1, %2, 0x7\n"
1513 " beq $0, %1, 9f\n"
1514 " daddiu %2, %2, -1\n"
1515 "6: lb %1, 0(%2)\n"
1516 " dins %0, %1, 16, 8\n"
1517 " andi %1, %2, 0x7\n"
1518 " beq $0, %1, 9f\n"
1519 " daddiu %2, %2, -1\n"
1520 "7: lb %1, 0(%2)\n"
1521 " dins %0, %1, 8, 8\n"
1522 " andi %1, %2, 0x7\n"
1523 " beq $0, %1, 9f\n"
1524 " daddiu %2, %2, -1\n"
1525 "0: lb %1, 0(%2)\n"
1526 " dins %0, %1, 0, 8\n"
1527#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1528 "1: lb %1, 0(%2)\n"
1529 " dinsu %0, %1, 56, 8\n"
1530 " daddiu %2, %2, 1\n"
1531 " andi %1, %2, 0x7\n"
1532 " beq $0, %1, 9f\n"
1533 "2: lb %1, 0(%2)\n"
1534 " dinsu %0, %1, 48, 8\n"
1535 " daddiu %2, %2, 1\n"
1536 " andi %1, %2, 0x7\n"
1537 " beq $0, %1, 9f\n"
1538 "3: lb %1, 0(%2)\n"
1539 " dinsu %0, %1, 40, 8\n"
1540 " daddiu %2, %2, 1\n"
1541 " andi %1, %2, 0x7\n"
1542 " beq $0, %1, 9f\n"
1543 "4: lb %1, 0(%2)\n"
1544 " dinsu %0, %1, 32, 8\n"
1545 " daddiu %2, %2, 1\n"
1546 " andi %1, %2, 0x7\n"
1547 " beq $0, %1, 9f\n"
1548 "5: lb %1, 0(%2)\n"
1549 " dins %0, %1, 24, 8\n"
1550 " daddiu %2, %2, 1\n"
1551 " andi %1, %2, 0x7\n"
1552 " beq $0, %1, 9f\n"
1553 "6: lb %1, 0(%2)\n"
1554 " dins %0, %1, 16, 8\n"
1555 " daddiu %2, %2, 1\n"
1556 " andi %1, %2, 0x7\n"
1557 " beq $0, %1, 9f\n"
1558 "7: lb %1, 0(%2)\n"
1559 " dins %0, %1, 8, 8\n"
1560 " daddiu %2, %2, 1\n"
1561 " andi %1, %2, 0x7\n"
1562 " beq $0, %1, 9f\n"
1563 "0: lb %1, 0(%2)\n"
1564 " dins %0, %1, 0, 8\n"
1565#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1566 "9:\n"
1567 " .insn\n"
1568 " .section .fixup,\"ax\"\n"
1569 "8: li %3,%4\n"
1570 " j 9b\n"
1571 " .previous\n"
1572 " .section __ex_table,\"a\"\n"
1573 " .word 1b,8b\n"
1574 " .word 2b,8b\n"
1575 " .word 3b,8b\n"
1576 " .word 4b,8b\n"
1577 " .word 5b,8b\n"
1578 " .word 6b,8b\n"
1579 " .word 7b,8b\n"
1580 " .word 0b,8b\n"
1581 " .previous\n"
1582 " .set pop\n"
1583 : "+&r"(rt), "=&r"(rs),
1584 "+&r"(vaddr), "+&r"(err)
1585 : "i"(SIGSEGV));
1586 if (MIPSInst_RT(inst) && !err)
1587 regs->regs[MIPSInst_RT(inst)] = rt;
1588
1589 MIPS_R2_STATS(loads);
1590 break;
1591
1592 case ldr_op:
1593 if (config_enabled(CONFIG_32BIT)) {
1594 err = SIGILL;
1595 break;
1596 }
1597
1598 rt = regs->regs[MIPSInst_RT(inst)];
1599 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1600 if (!access_ok(VERIFY_READ, vaddr, 8)) {
1601 current->thread.cp0_baduaddr = vaddr;
1602 err = SIGSEGV;
1603 break;
1604 }
1605 __asm__ __volatile__(
1606 " .set push\n"
1607 " .set reorder\n"
1608#ifdef CONFIG_CPU_LITTLE_ENDIAN
1609 "1: lb %1, 0(%2)\n"
1610 " dins %0, %1, 0, 8\n"
1611 " daddiu %2, %2, 1\n"
1612 " andi %1, %2, 0x7\n"
1613 " beq $0, %1, 9f\n"
1614 "2: lb %1, 0(%2)\n"
1615 " dins %0, %1, 8, 8\n"
1616 " daddiu %2, %2, 1\n"
1617 " andi %1, %2, 0x7\n"
1618 " beq $0, %1, 9f\n"
1619 "3: lb %1, 0(%2)\n"
1620 " dins %0, %1, 16, 8\n"
1621 " daddiu %2, %2, 1\n"
1622 " andi %1, %2, 0x7\n"
1623 " beq $0, %1, 9f\n"
1624 "4: lb %1, 0(%2)\n"
1625 " dins %0, %1, 24, 8\n"
1626 " daddiu %2, %2, 1\n"
1627 " andi %1, %2, 0x7\n"
1628 " beq $0, %1, 9f\n"
1629 "5: lb %1, 0(%2)\n"
1630 " dinsu %0, %1, 32, 8\n"
1631 " daddiu %2, %2, 1\n"
1632 " andi %1, %2, 0x7\n"
1633 " beq $0, %1, 9f\n"
1634 "6: lb %1, 0(%2)\n"
1635 " dinsu %0, %1, 40, 8\n"
1636 " daddiu %2, %2, 1\n"
1637 " andi %1, %2, 0x7\n"
1638 " beq $0, %1, 9f\n"
1639 "7: lb %1, 0(%2)\n"
1640 " dinsu %0, %1, 48, 8\n"
1641 " daddiu %2, %2, 1\n"
1642 " andi %1, %2, 0x7\n"
1643 " beq $0, %1, 9f\n"
1644 "0: lb %1, 0(%2)\n"
1645 " dinsu %0, %1, 56, 8\n"
1646#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1647 "1: lb %1, 0(%2)\n"
1648 " dins %0, %1, 0, 8\n"
1649 " andi %1, %2, 0x7\n"
1650 " beq $0, %1, 9f\n"
1651 " daddiu %2, %2, -1\n"
1652 "2: lb %1, 0(%2)\n"
1653 " dins %0, %1, 8, 8\n"
1654 " andi %1, %2, 0x7\n"
1655 " beq $0, %1, 9f\n"
1656 " daddiu %2, %2, -1\n"
1657 "3: lb %1, 0(%2)\n"
1658 " dins %0, %1, 16, 8\n"
1659 " andi %1, %2, 0x7\n"
1660 " beq $0, %1, 9f\n"
1661 " daddiu %2, %2, -1\n"
1662 "4: lb %1, 0(%2)\n"
1663 " dins %0, %1, 24, 8\n"
1664 " andi %1, %2, 0x7\n"
1665 " beq $0, %1, 9f\n"
1666 " daddiu %2, %2, -1\n"
1667 "5: lb %1, 0(%2)\n"
1668 " dinsu %0, %1, 32, 8\n"
1669 " andi %1, %2, 0x7\n"
1670 " beq $0, %1, 9f\n"
1671 " daddiu %2, %2, -1\n"
1672 "6: lb %1, 0(%2)\n"
1673 " dinsu %0, %1, 40, 8\n"
1674 " andi %1, %2, 0x7\n"
1675 " beq $0, %1, 9f\n"
1676 " daddiu %2, %2, -1\n"
1677 "7: lb %1, 0(%2)\n"
1678 " dinsu %0, %1, 48, 8\n"
1679 " andi %1, %2, 0x7\n"
1680 " beq $0, %1, 9f\n"
1681 " daddiu %2, %2, -1\n"
1682 "0: lb %1, 0(%2)\n"
1683 " dinsu %0, %1, 56, 8\n"
1684#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1685 "9:\n"
1686 " .insn\n"
1687 " .section .fixup,\"ax\"\n"
1688 "8: li %3,%4\n"
1689 " j 9b\n"
1690 " .previous\n"
1691 " .section __ex_table,\"a\"\n"
1692 " .word 1b,8b\n"
1693 " .word 2b,8b\n"
1694 " .word 3b,8b\n"
1695 " .word 4b,8b\n"
1696 " .word 5b,8b\n"
1697 " .word 6b,8b\n"
1698 " .word 7b,8b\n"
1699 " .word 0b,8b\n"
1700 " .previous\n"
1701 " .set pop\n"
1702 : "+&r"(rt), "=&r"(rs),
1703 "+&r"(vaddr), "+&r"(err)
1704 : "i"(SIGSEGV));
1705 if (MIPSInst_RT(inst) && !err)
1706 regs->regs[MIPSInst_RT(inst)] = rt;
1707
1708 MIPS_R2_STATS(loads);
1709 break;
1710
1711 case sdl_op:
1712 if (config_enabled(CONFIG_32BIT)) {
1713 err = SIGILL;
1714 break;
1715 }
1716
1717 rt = regs->regs[MIPSInst_RT(inst)];
1718 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1719 if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
1720 current->thread.cp0_baduaddr = vaddr;
1721 err = SIGSEGV;
1722 break;
1723 }
1724 __asm__ __volatile__(
1725 " .set push\n"
1726 " .set reorder\n"
1727#ifdef CONFIG_CPU_LITTLE_ENDIAN
1728 " dextu %1, %0, 56, 8\n"
1729 "1: sb %1, 0(%2)\n"
1730 " andi %1, %2, 0x7\n"
1731 " beq $0, %1, 9f\n"
1732 " daddiu %2, %2, -1\n"
1733 " dextu %1, %0, 48, 8\n"
1734 "2: sb %1, 0(%2)\n"
1735 " andi %1, %2, 0x7\n"
1736 " beq $0, %1, 9f\n"
1737 " daddiu %2, %2, -1\n"
1738 " dextu %1, %0, 40, 8\n"
1739 "3: sb %1, 0(%2)\n"
1740 " andi %1, %2, 0x7\n"
1741 " beq $0, %1, 9f\n"
1742 " daddiu %2, %2, -1\n"
1743 " dextu %1, %0, 32, 8\n"
1744 "4: sb %1, 0(%2)\n"
1745 " andi %1, %2, 0x7\n"
1746 " beq $0, %1, 9f\n"
1747 " daddiu %2, %2, -1\n"
1748 " dext %1, %0, 24, 8\n"
1749 "5: sb %1, 0(%2)\n"
1750 " andi %1, %2, 0x7\n"
1751 " beq $0, %1, 9f\n"
1752 " daddiu %2, %2, -1\n"
1753 " dext %1, %0, 16, 8\n"
1754 "6: sb %1, 0(%2)\n"
1755 " andi %1, %2, 0x7\n"
1756 " beq $0, %1, 9f\n"
1757 " daddiu %2, %2, -1\n"
1758 " dext %1, %0, 8, 8\n"
1759 "7: sb %1, 0(%2)\n"
1760 " andi %1, %2, 0x7\n"
1761 " beq $0, %1, 9f\n"
1762 " daddiu %2, %2, -1\n"
1763 " dext %1, %0, 0, 8\n"
1764 "0: sb %1, 0(%2)\n"
1765#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1766 " dextu %1, %0, 56, 8\n"
1767 "1: sb %1, 0(%2)\n"
1768 " daddiu %2, %2, 1\n"
1769 " andi %1, %2, 0x7\n"
1770 " beq $0, %1, 9f\n"
1771 " dextu %1, %0, 48, 8\n"
1772 "2: sb %1, 0(%2)\n"
1773 " daddiu %2, %2, 1\n"
1774 " andi %1, %2, 0x7\n"
1775 " beq $0, %1, 9f\n"
1776 " dextu %1, %0, 40, 8\n"
1777 "3: sb %1, 0(%2)\n"
1778 " daddiu %2, %2, 1\n"
1779 " andi %1, %2, 0x7\n"
1780 " beq $0, %1, 9f\n"
1781 " dextu %1, %0, 32, 8\n"
1782 "4: sb %1, 0(%2)\n"
1783 " daddiu %2, %2, 1\n"
1784 " andi %1, %2, 0x7\n"
1785 " beq $0, %1, 9f\n"
1786 " dext %1, %0, 24, 8\n"
1787 "5: sb %1, 0(%2)\n"
1788 " daddiu %2, %2, 1\n"
1789 " andi %1, %2, 0x7\n"
1790 " beq $0, %1, 9f\n"
1791 " dext %1, %0, 16, 8\n"
1792 "6: sb %1, 0(%2)\n"
1793 " daddiu %2, %2, 1\n"
1794 " andi %1, %2, 0x7\n"
1795 " beq $0, %1, 9f\n"
1796 " dext %1, %0, 8, 8\n"
1797 "7: sb %1, 0(%2)\n"
1798 " daddiu %2, %2, 1\n"
1799 " andi %1, %2, 0x7\n"
1800 " beq $0, %1, 9f\n"
1801 " dext %1, %0, 0, 8\n"
1802 "0: sb %1, 0(%2)\n"
1803#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1804 "9:\n"
1805 " .insn\n"
1806 " .section .fixup,\"ax\"\n"
1807 "8: li %3,%4\n"
1808 " j 9b\n"
1809 " .previous\n"
1810 " .section __ex_table,\"a\"\n"
1811 " .word 1b,8b\n"
1812 " .word 2b,8b\n"
1813 " .word 3b,8b\n"
1814 " .word 4b,8b\n"
1815 " .word 5b,8b\n"
1816 " .word 6b,8b\n"
1817 " .word 7b,8b\n"
1818 " .word 0b,8b\n"
1819 " .previous\n"
1820 " .set pop\n"
1821 : "+&r"(rt), "=&r"(rs),
1822 "+&r"(vaddr), "+&r"(err)
1823 : "i"(SIGSEGV)
1824 : "memory");
1825
1826 MIPS_R2_STATS(stores);
1827 break;
1828
1829 case sdr_op:
1830 if (config_enabled(CONFIG_32BIT)) {
1831 err = SIGILL;
1832 break;
1833 }
1834
1835 rt = regs->regs[MIPSInst_RT(inst)];
1836 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1837 if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
1838 current->thread.cp0_baduaddr = vaddr;
1839 err = SIGSEGV;
1840 break;
1841 }
1842 __asm__ __volatile__(
1843 " .set push\n"
1844 " .set reorder\n"
1845#ifdef CONFIG_CPU_LITTLE_ENDIAN
1846 " dext %1, %0, 0, 8\n"
1847 "1: sb %1, 0(%2)\n"
1848 " daddiu %2, %2, 1\n"
1849 " andi %1, %2, 0x7\n"
1850 " beq $0, %1, 9f\n"
1851 " dext %1, %0, 8, 8\n"
1852 "2: sb %1, 0(%2)\n"
1853 " daddiu %2, %2, 1\n"
1854 " andi %1, %2, 0x7\n"
1855 " beq $0, %1, 9f\n"
1856 " dext %1, %0, 16, 8\n"
1857 "3: sb %1, 0(%2)\n"
1858 " daddiu %2, %2, 1\n"
1859 " andi %1, %2, 0x7\n"
1860 " beq $0, %1, 9f\n"
1861 " dext %1, %0, 24, 8\n"
1862 "4: sb %1, 0(%2)\n"
1863 " daddiu %2, %2, 1\n"
1864 " andi %1, %2, 0x7\n"
1865 " beq $0, %1, 9f\n"
1866 " dextu %1, %0, 32, 8\n"
1867 "5: sb %1, 0(%2)\n"
1868 " daddiu %2, %2, 1\n"
1869 " andi %1, %2, 0x7\n"
1870 " beq $0, %1, 9f\n"
1871 " dextu %1, %0, 40, 8\n"
1872 "6: sb %1, 0(%2)\n"
1873 " daddiu %2, %2, 1\n"
1874 " andi %1, %2, 0x7\n"
1875 " beq $0, %1, 9f\n"
1876 " dextu %1, %0, 48, 8\n"
1877 "7: sb %1, 0(%2)\n"
1878 " daddiu %2, %2, 1\n"
1879 " andi %1, %2, 0x7\n"
1880 " beq $0, %1, 9f\n"
1881 " dextu %1, %0, 56, 8\n"
1882 "0: sb %1, 0(%2)\n"
1883#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1884 " dext %1, %0, 0, 8\n"
1885 "1: sb %1, 0(%2)\n"
1886 " andi %1, %2, 0x7\n"
1887 " beq $0, %1, 9f\n"
1888 " daddiu %2, %2, -1\n"
1889 " dext %1, %0, 8, 8\n"
1890 "2: sb %1, 0(%2)\n"
1891 " andi %1, %2, 0x7\n"
1892 " beq $0, %1, 9f\n"
1893 " daddiu %2, %2, -1\n"
1894 " dext %1, %0, 16, 8\n"
1895 "3: sb %1, 0(%2)\n"
1896 " andi %1, %2, 0x7\n"
1897 " beq $0, %1, 9f\n"
1898 " daddiu %2, %2, -1\n"
1899 " dext %1, %0, 24, 8\n"
1900 "4: sb %1, 0(%2)\n"
1901 " andi %1, %2, 0x7\n"
1902 " beq $0, %1, 9f\n"
1903 " daddiu %2, %2, -1\n"
1904 " dextu %1, %0, 32, 8\n"
1905 "5: sb %1, 0(%2)\n"
1906 " andi %1, %2, 0x7\n"
1907 " beq $0, %1, 9f\n"
1908 " daddiu %2, %2, -1\n"
1909 " dextu %1, %0, 40, 8\n"
1910 "6: sb %1, 0(%2)\n"
1911 " andi %1, %2, 0x7\n"
1912 " beq $0, %1, 9f\n"
1913 " daddiu %2, %2, -1\n"
1914 " dextu %1, %0, 48, 8\n"
1915 "7: sb %1, 0(%2)\n"
1916 " andi %1, %2, 0x7\n"
1917 " beq $0, %1, 9f\n"
1918 " daddiu %2, %2, -1\n"
1919 " dextu %1, %0, 56, 8\n"
1920 "0: sb %1, 0(%2)\n"
1921#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1922 "9:\n"
1923 " .insn\n"
1924 " .section .fixup,\"ax\"\n"
1925 "8: li %3,%4\n"
1926 " j 9b\n"
1927 " .previous\n"
1928 " .section __ex_table,\"a\"\n"
1929 " .word 1b,8b\n"
1930 " .word 2b,8b\n"
1931 " .word 3b,8b\n"
1932 " .word 4b,8b\n"
1933 " .word 5b,8b\n"
1934 " .word 6b,8b\n"
1935 " .word 7b,8b\n"
1936 " .word 0b,8b\n"
1937 " .previous\n"
1938 " .set pop\n"
1939 : "+&r"(rt), "=&r"(rs),
1940 "+&r"(vaddr), "+&r"(err)
1941 : "i"(SIGSEGV)
1942 : "memory");
1943
1944 MIPS_R2_STATS(stores);
1945
1946 break;
1947 case ll_op:
1948 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1949 if (vaddr & 0x3) {
1950 current->thread.cp0_baduaddr = vaddr;
1951 err = SIGBUS;
1952 break;
1953 }
1954 if (!access_ok(VERIFY_READ, vaddr, 4)) {
1955 current->thread.cp0_baduaddr = vaddr;
1956 err = SIGBUS;
1957 break;
1958 }
1959
1960 if (!cpu_has_rw_llb) {
1961 /*
1962 * An LL/SC block can't be safely emulated without
1963 * a Config5/LLB availability. So it's probably time to
1964 * kill our process before things get any worse. This is
1965 * because Config5/LLB allows us to use ERETNC so that
1966 * the LLAddr/LLB bit is not cleared when we return from
1967 * an exception. MIPS R2 LL/SC instructions trap with an
1968 * RI exception so once we emulate them here, we return
1969 * back to userland with ERETNC. That preserves the
1970 * LLAddr/LLB so the subsequent SC instruction will
1971 * succeed preserving the atomic semantics of the LL/SC
1972 * block. Without that, there is no safe way to emulate
1973 * an LL/SC block in MIPSR2 userland.
1974 */
1975 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
1976 err = SIGKILL;
1977 break;
1978 }
1979
1980 __asm__ __volatile__(
1981 "1:\n"
1982 "ll %0, 0(%2)\n"
1983 "2:\n"
1984 ".insn\n"
1985 ".section .fixup,\"ax\"\n"
1986 "3:\n"
1987 "li %1, %3\n"
1988 "j 2b\n"
1989 ".previous\n"
1990 ".section __ex_table,\"a\"\n"
1991 ".word 1b, 3b\n"
1992 ".previous\n"
1993 : "=&r"(res), "+&r"(err)
1994 : "r"(vaddr), "i"(SIGSEGV)
1995 : "memory");
1996
1997 if (MIPSInst_RT(inst) && !err)
1998 regs->regs[MIPSInst_RT(inst)] = res;
1999 MIPS_R2_STATS(llsc);
2000
2001 break;
2002
2003 case sc_op:
2004 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2005 if (vaddr & 0x3) {
2006 current->thread.cp0_baduaddr = vaddr;
2007 err = SIGBUS;
2008 break;
2009 }
2010 if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
2011 current->thread.cp0_baduaddr = vaddr;
2012 err = SIGBUS;
2013 break;
2014 }
2015
2016 if (!cpu_has_rw_llb) {
2017 /*
2018 * An LL/SC block can't be safely emulated without
2019 * a Config5/LLB availability. So it's probably time to
2020 * kill our process before things get any worse. This is
2021 * because Config5/LLB allows us to use ERETNC so that
2022 * the LLAddr/LLB bit is not cleared when we return from
2023 * an exception. MIPS R2 LL/SC instructions trap with an
2024 * RI exception so once we emulate them here, we return
2025 * back to userland with ERETNC. That preserves the
2026 * LLAddr/LLB so the subsequent SC instruction will
2027 * succeed preserving the atomic semantics of the LL/SC
2028 * block. Without that, there is no safe way to emulate
2029 * an LL/SC block in MIPSR2 userland.
2030 */
2031 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2032 err = SIGKILL;
2033 break;
2034 }
2035
2036 res = regs->regs[MIPSInst_RT(inst)];
2037
2038 __asm__ __volatile__(
2039 "1:\n"
2040 "sc %0, 0(%2)\n"
2041 "2:\n"
2042 ".insn\n"
2043 ".section .fixup,\"ax\"\n"
2044 "3:\n"
2045 "li %1, %3\n"
2046 "j 2b\n"
2047 ".previous\n"
2048 ".section __ex_table,\"a\"\n"
2049 ".word 1b, 3b\n"
2050 ".previous\n"
2051 : "+&r"(res), "+&r"(err)
2052 : "r"(vaddr), "i"(SIGSEGV));
2053
2054 if (MIPSInst_RT(inst) && !err)
2055 regs->regs[MIPSInst_RT(inst)] = res;
2056
2057 MIPS_R2_STATS(llsc);
2058
2059 break;
2060
2061 case lld_op:
2062 if (config_enabled(CONFIG_32BIT)) {
2063 err = SIGILL;
2064 break;
2065 }
2066
2067 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2068 if (vaddr & 0x7) {
2069 current->thread.cp0_baduaddr = vaddr;
2070 err = SIGBUS;
2071 break;
2072 }
2073 if (!access_ok(VERIFY_READ, vaddr, 8)) {
2074 current->thread.cp0_baduaddr = vaddr;
2075 err = SIGBUS;
2076 break;
2077 }
2078
2079 if (!cpu_has_rw_llb) {
2080 /*
2081 * An LL/SC block can't be safely emulated without
2082 * a Config5/LLB availability. So it's probably time to
2083 * kill our process before things get any worse. This is
2084 * because Config5/LLB allows us to use ERETNC so that
2085 * the LLAddr/LLB bit is not cleared when we return from
2086 * an exception. MIPS R2 LL/SC instructions trap with an
2087 * RI exception so once we emulate them here, we return
2088 * back to userland with ERETNC. That preserves the
2089 * LLAddr/LLB so the subsequent SC instruction will
2090 * succeed preserving the atomic semantics of the LL/SC
2091 * block. Without that, there is no safe way to emulate
2092 * an LL/SC block in MIPSR2 userland.
2093 */
2094 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2095 err = SIGKILL;
2096 break;
2097 }
2098
2099 __asm__ __volatile__(
2100 "1:\n"
2101 "lld %0, 0(%2)\n"
2102 "2:\n"
2103 ".insn\n"
2104 ".section .fixup,\"ax\"\n"
2105 "3:\n"
2106 "li %1, %3\n"
2107 "j 2b\n"
2108 ".previous\n"
2109 ".section __ex_table,\"a\"\n"
2110 ".word 1b, 3b\n"
2111 ".previous\n"
2112 : "=&r"(res), "+&r"(err)
2113 : "r"(vaddr), "i"(SIGSEGV)
2114 : "memory");
2115 if (MIPSInst_RT(inst) && !err)
2116 regs->regs[MIPSInst_RT(inst)] = res;
2117
2118 MIPS_R2_STATS(llsc);
2119
2120 break;
2121
2122 case scd_op:
2123 if (config_enabled(CONFIG_32BIT)) {
2124 err = SIGILL;
2125 break;
2126 }
2127
2128 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2129 if (vaddr & 0x7) {
2130 current->thread.cp0_baduaddr = vaddr;
2131 err = SIGBUS;
2132 break;
2133 }
2134 if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
2135 current->thread.cp0_baduaddr = vaddr;
2136 err = SIGBUS;
2137 break;
2138 }
2139
2140 if (!cpu_has_rw_llb) {
2141 /*
2142 * An LL/SC block can't be safely emulated without
2143 * a Config5/LLB availability. So it's probably time to
2144 * kill our process before things get any worse. This is
2145 * because Config5/LLB allows us to use ERETNC so that
2146 * the LLAddr/LLB bit is not cleared when we return from
2147 * an exception. MIPS R2 LL/SC instructions trap with an
2148 * RI exception so once we emulate them here, we return
2149 * back to userland with ERETNC. That preserves the
2150 * LLAddr/LLB so the subsequent SC instruction will
2151 * succeed preserving the atomic semantics of the LL/SC
2152 * block. Without that, there is no safe way to emulate
2153 * an LL/SC block in MIPSR2 userland.
2154 */
2155 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2156 err = SIGKILL;
2157 break;
2158 }
2159
2160 res = regs->regs[MIPSInst_RT(inst)];
2161
2162 __asm__ __volatile__(
2163 "1:\n"
2164 "scd %0, 0(%2)\n"
2165 "2:\n"
2166 ".insn\n"
2167 ".section .fixup,\"ax\"\n"
2168 "3:\n"
2169 "li %1, %3\n"
2170 "j 2b\n"
2171 ".previous\n"
2172 ".section __ex_table,\"a\"\n"
2173 ".word 1b, 3b\n"
2174 ".previous\n"
2175 : "+&r"(res), "+&r"(err)
2176 : "r"(vaddr), "i"(SIGSEGV));
2177
2178 if (MIPSInst_RT(inst) && !err)
2179 regs->regs[MIPSInst_RT(inst)] = res;
2180
2181 MIPS_R2_STATS(llsc);
2182
2183 break;
2184 case pref_op:
2185 /* skip it */
2186 break;
2187 default:
2188 err = SIGILL;
2189 }
2190
2191 /*
2192 * Lets not return to userland just yet. It's constly and
2193 * it's likely we have more R2 instructions to emulate
2194 */
2195 if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
2196 regs->cp0_cause &= ~CAUSEF_BD;
2197 err = get_user(inst, (u32 __user *)regs->cp0_epc);
2198 if (!err)
2199 goto repeat;
2200
2201 if (err < 0)
2202 err = SIGSEGV;
2203 }
2204
2205 if (err && (err != SIGEMT)) {
2206 regs->regs[31] = r31;
2207 regs->cp0_epc = epc;
2208 }
2209
2210 /* Likely a MIPS R6 compatible instruction */
2211 if (pass && (err == SIGILL))
2212 err = 0;
2213
2214 return err;
2215}
2216
2217#ifdef CONFIG_DEBUG_FS
2218
2219static int mipsr2_stats_show(struct seq_file *s, void *unused)
2220{
2221
2222 seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
2223 seq_printf(s, "movs\t\t%ld\t%ld\n",
2224 (unsigned long)__this_cpu_read(mipsr2emustats.movs),
2225 (unsigned long)__this_cpu_read(mipsr2bdemustats.movs));
2226 seq_printf(s, "hilo\t\t%ld\t%ld\n",
2227 (unsigned long)__this_cpu_read(mipsr2emustats.hilo),
2228 (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo));
2229 seq_printf(s, "muls\t\t%ld\t%ld\n",
2230 (unsigned long)__this_cpu_read(mipsr2emustats.muls),
2231 (unsigned long)__this_cpu_read(mipsr2bdemustats.muls));
2232 seq_printf(s, "divs\t\t%ld\t%ld\n",
2233 (unsigned long)__this_cpu_read(mipsr2emustats.divs),
2234 (unsigned long)__this_cpu_read(mipsr2bdemustats.divs));
2235 seq_printf(s, "dsps\t\t%ld\t%ld\n",
2236 (unsigned long)__this_cpu_read(mipsr2emustats.dsps),
2237 (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps));
2238 seq_printf(s, "bops\t\t%ld\t%ld\n",
2239 (unsigned long)__this_cpu_read(mipsr2emustats.bops),
2240 (unsigned long)__this_cpu_read(mipsr2bdemustats.bops));
2241 seq_printf(s, "traps\t\t%ld\t%ld\n",
2242 (unsigned long)__this_cpu_read(mipsr2emustats.traps),
2243 (unsigned long)__this_cpu_read(mipsr2bdemustats.traps));
2244 seq_printf(s, "fpus\t\t%ld\t%ld\n",
2245 (unsigned long)__this_cpu_read(mipsr2emustats.fpus),
2246 (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus));
2247 seq_printf(s, "loads\t\t%ld\t%ld\n",
2248 (unsigned long)__this_cpu_read(mipsr2emustats.loads),
2249 (unsigned long)__this_cpu_read(mipsr2bdemustats.loads));
2250 seq_printf(s, "stores\t\t%ld\t%ld\n",
2251 (unsigned long)__this_cpu_read(mipsr2emustats.stores),
2252 (unsigned long)__this_cpu_read(mipsr2bdemustats.stores));
2253 seq_printf(s, "llsc\t\t%ld\t%ld\n",
2254 (unsigned long)__this_cpu_read(mipsr2emustats.llsc),
2255 (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc));
2256 seq_printf(s, "dsemul\t\t%ld\t%ld\n",
2257 (unsigned long)__this_cpu_read(mipsr2emustats.dsemul),
2258 (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul));
2259 seq_printf(s, "jr\t\t%ld\n",
2260 (unsigned long)__this_cpu_read(mipsr2bremustats.jrs));
2261 seq_printf(s, "bltzl\t\t%ld\n",
2262 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl));
2263 seq_printf(s, "bgezl\t\t%ld\n",
2264 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl));
2265 seq_printf(s, "bltzll\t\t%ld\n",
2266 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll));
2267 seq_printf(s, "bgezll\t\t%ld\n",
2268 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll));
2269 seq_printf(s, "bltzal\t\t%ld\n",
2270 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal));
2271 seq_printf(s, "bgezal\t\t%ld\n",
2272 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal));
2273 seq_printf(s, "beql\t\t%ld\n",
2274 (unsigned long)__this_cpu_read(mipsr2bremustats.beql));
2275 seq_printf(s, "bnel\t\t%ld\n",
2276 (unsigned long)__this_cpu_read(mipsr2bremustats.bnel));
2277 seq_printf(s, "blezl\t\t%ld\n",
2278 (unsigned long)__this_cpu_read(mipsr2bremustats.blezl));
2279 seq_printf(s, "bgtzl\t\t%ld\n",
2280 (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl));
2281
2282 return 0;
2283}
2284
2285static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
2286{
2287 mipsr2_stats_show(s, unused);
2288
2289 __this_cpu_write((mipsr2emustats).movs, 0);
2290 __this_cpu_write((mipsr2bdemustats).movs, 0);
2291 __this_cpu_write((mipsr2emustats).hilo, 0);
2292 __this_cpu_write((mipsr2bdemustats).hilo, 0);
2293 __this_cpu_write((mipsr2emustats).muls, 0);
2294 __this_cpu_write((mipsr2bdemustats).muls, 0);
2295 __this_cpu_write((mipsr2emustats).divs, 0);
2296 __this_cpu_write((mipsr2bdemustats).divs, 0);
2297 __this_cpu_write((mipsr2emustats).dsps, 0);
2298 __this_cpu_write((mipsr2bdemustats).dsps, 0);
2299 __this_cpu_write((mipsr2emustats).bops, 0);
2300 __this_cpu_write((mipsr2bdemustats).bops, 0);
2301 __this_cpu_write((mipsr2emustats).traps, 0);
2302 __this_cpu_write((mipsr2bdemustats).traps, 0);
2303 __this_cpu_write((mipsr2emustats).fpus, 0);
2304 __this_cpu_write((mipsr2bdemustats).fpus, 0);
2305 __this_cpu_write((mipsr2emustats).loads, 0);
2306 __this_cpu_write((mipsr2bdemustats).loads, 0);
2307 __this_cpu_write((mipsr2emustats).stores, 0);
2308 __this_cpu_write((mipsr2bdemustats).stores, 0);
2309 __this_cpu_write((mipsr2emustats).llsc, 0);
2310 __this_cpu_write((mipsr2bdemustats).llsc, 0);
2311 __this_cpu_write((mipsr2emustats).dsemul, 0);
2312 __this_cpu_write((mipsr2bdemustats).dsemul, 0);
2313 __this_cpu_write((mipsr2bremustats).jrs, 0);
2314 __this_cpu_write((mipsr2bremustats).bltzl, 0);
2315 __this_cpu_write((mipsr2bremustats).bgezl, 0);
2316 __this_cpu_write((mipsr2bremustats).bltzll, 0);
2317 __this_cpu_write((mipsr2bremustats).bgezll, 0);
2318 __this_cpu_write((mipsr2bremustats).bltzal, 0);
2319 __this_cpu_write((mipsr2bremustats).bgezal, 0);
2320 __this_cpu_write((mipsr2bremustats).beql, 0);
2321 __this_cpu_write((mipsr2bremustats).bnel, 0);
2322 __this_cpu_write((mipsr2bremustats).blezl, 0);
2323 __this_cpu_write((mipsr2bremustats).bgtzl, 0);
2324
2325 return 0;
2326}
2327
2328static int mipsr2_stats_open(struct inode *inode, struct file *file)
2329{
2330 return single_open(file, mipsr2_stats_show, inode->i_private);
2331}
2332
2333static int mipsr2_stats_clear_open(struct inode *inode, struct file *file)
2334{
2335 return single_open(file, mipsr2_stats_clear_show, inode->i_private);
2336}
2337
2338static const struct file_operations mipsr2_emul_fops = {
2339 .open = mipsr2_stats_open,
2340 .read = seq_read,
2341 .llseek = seq_lseek,
2342 .release = single_release,
2343};
2344
2345static const struct file_operations mipsr2_clear_fops = {
2346 .open = mipsr2_stats_clear_open,
2347 .read = seq_read,
2348 .llseek = seq_lseek,
2349 .release = single_release,
2350};
2351
2352
2353static int __init mipsr2_init_debugfs(void)
2354{
2355 extern struct dentry *mips_debugfs_dir;
2356 struct dentry *mipsr2_emul;
2357
2358 if (!mips_debugfs_dir)
2359 return -ENODEV;
2360
2361 mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO,
2362 mips_debugfs_dir, NULL,
2363 &mipsr2_emul_fops);
2364 if (!mipsr2_emul)
2365 return -ENOMEM;
2366
2367 mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO,
2368 mips_debugfs_dir, NULL,
2369 &mipsr2_clear_fops);
2370 if (!mipsr2_emul)
2371 return -ENOMEM;
2372
2373 return 0;
2374}
2375
2376device_initcall(mipsr2_init_debugfs);
2377
2378#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 17eaf0cf760c..291af0b5c482 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -14,6 +14,8 @@
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <asm/ftrace.h> 16#include <asm/ftrace.h>
17#include <asm/fpu.h>
18#include <asm/msa.h>
17 19
18extern void *__bzero(void *__s, size_t __count); 20extern void *__bzero(void *__s, size_t __count);
19extern long __strncpy_from_kernel_nocheck_asm(char *__to, 21extern long __strncpy_from_kernel_nocheck_asm(char *__to,
@@ -32,6 +34,14 @@ extern long __strnlen_user_nocheck_asm(const char *s);
32extern long __strnlen_user_asm(const char *s); 34extern long __strnlen_user_asm(const char *s);
33 35
34/* 36/*
37 * Core architecture code
38 */
39EXPORT_SYMBOL_GPL(_save_fp);
40#ifdef CONFIG_CPU_HAS_MSA
41EXPORT_SYMBOL_GPL(_save_msa);
42#endif
43
44/*
35 * String functions 45 * String functions
36 */ 46 */
37EXPORT_SYMBOL(memset); 47EXPORT_SYMBOL(memset);
@@ -67,11 +77,13 @@ EXPORT_SYMBOL(__strnlen_kernel_asm);
67EXPORT_SYMBOL(__strnlen_user_nocheck_asm); 77EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
68EXPORT_SYMBOL(__strnlen_user_asm); 78EXPORT_SYMBOL(__strnlen_user_asm);
69 79
80#ifndef CONFIG_CPU_MIPSR6
70EXPORT_SYMBOL(csum_partial); 81EXPORT_SYMBOL(csum_partial);
71EXPORT_SYMBOL(csum_partial_copy_nocheck); 82EXPORT_SYMBOL(csum_partial_copy_nocheck);
72EXPORT_SYMBOL(__csum_partial_copy_kernel); 83EXPORT_SYMBOL(__csum_partial_copy_kernel);
73EXPORT_SYMBOL(__csum_partial_copy_to_user); 84EXPORT_SYMBOL(__csum_partial_copy_to_user);
74EXPORT_SYMBOL(__csum_partial_copy_from_user); 85EXPORT_SYMBOL(__csum_partial_copy_from_user);
86#endif
75 87
76EXPORT_SYMBOL(invalid_pte_table); 88EXPORT_SYMBOL(invalid_pte_table);
77#ifdef CONFIG_FUNCTION_TRACER 89#ifdef CONFIG_FUNCTION_TRACER
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index f6547680c81c..423ae83af1fb 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -31,15 +31,11 @@
31 /* 31 /*
32 * check if we need to save FPU registers 32 * check if we need to save FPU registers
33 */ 33 */
34 PTR_L t3, TASK_THREAD_INFO(a0) 34 .set push
35 LONG_L t0, TI_FLAGS(t3) 35 .set noreorder
36 li t1, _TIF_USEDFPU 36 beqz a3, 1f
37 and t2, t0, t1 37 PTR_L t3, TASK_THREAD_INFO(a0)
38 beqz t2, 1f 38 .set pop
39 nor t1, zero, t1
40
41 and t0, t0, t1
42 LONG_S t0, TI_FLAGS(t3)
43 39
44 /* 40 /*
45 * clear saved user stack CU1 bit 41 * clear saved user stack CU1 bit
@@ -56,36 +52,9 @@
56 .set pop 52 .set pop
571: 531:
58 54
59 /* check if we need to save COP2 registers */
60 PTR_L t2, TASK_THREAD_INFO(a0)
61 LONG_L t0, ST_OFF(t2)
62 bbit0 t0, 30, 1f
63
64 /* Disable COP2 in the stored process state */
65 li t1, ST0_CU2
66 xor t0, t1
67 LONG_S t0, ST_OFF(t2)
68
69 /* Enable COP2 so we can save it */
70 mfc0 t0, CP0_STATUS
71 or t0, t1
72 mtc0 t0, CP0_STATUS
73
74 /* Save COP2 */
75 daddu a0, THREAD_CP2
76 jal octeon_cop2_save
77 dsubu a0, THREAD_CP2
78
79 /* Disable COP2 now that we are done */
80 mfc0 t0, CP0_STATUS
81 li t1, ST0_CU2
82 xor t0, t1
83 mtc0 t0, CP0_STATUS
84
851:
86#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 55#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
87 /* Check if we need to store CVMSEG state */ 56 /* Check if we need to store CVMSEG state */
88 mfc0 t0, $11,7 /* CvmMemCtl */ 57 dmfc0 t0, $11,7 /* CvmMemCtl */
89 bbit0 t0, 6, 3f /* Is user access enabled? */ 58 bbit0 t0, 6, 3f /* Is user access enabled? */
90 59
91 /* Store the CVMSEG state */ 60 /* Store the CVMSEG state */
@@ -109,9 +78,9 @@
109 .set reorder 78 .set reorder
110 79
111 /* Disable access to CVMSEG */ 80 /* Disable access to CVMSEG */
112 mfc0 t0, $11,7 /* CvmMemCtl */ 81 dmfc0 t0, $11,7 /* CvmMemCtl */
113 xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ 82 xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */
114 mtc0 t0, $11,7 /* CvmMemCtl */ 83 dmtc0 t0, $11,7 /* CvmMemCtl */
115#endif 84#endif
1163: 853:
117 86
@@ -147,6 +116,8 @@
147 * void octeon_cop2_save(struct octeon_cop2_state *a0) 116 * void octeon_cop2_save(struct octeon_cop2_state *a0)
148 */ 117 */
149 .align 7 118 .align 7
119 .set push
120 .set noreorder
150 LEAF(octeon_cop2_save) 121 LEAF(octeon_cop2_save)
151 122
152 dmfc0 t9, $9,7 /* CvmCtl register. */ 123 dmfc0 t9, $9,7 /* CvmCtl register. */
@@ -157,17 +128,17 @@
157 dmfc2 t2, 0x0200 128 dmfc2 t2, 0x0200
158 sd t0, OCTEON_CP2_CRC_IV(a0) 129 sd t0, OCTEON_CP2_CRC_IV(a0)
159 sd t1, OCTEON_CP2_CRC_LENGTH(a0) 130 sd t1, OCTEON_CP2_CRC_LENGTH(a0)
160 sd t2, OCTEON_CP2_CRC_POLY(a0)
161 /* Skip next instructions if CvmCtl[NODFA_CP2] set */ 131 /* Skip next instructions if CvmCtl[NODFA_CP2] set */
162 bbit1 t9, 28, 1f 132 bbit1 t9, 28, 1f
133 sd t2, OCTEON_CP2_CRC_POLY(a0)
163 134
164 /* Save the LLM state */ 135 /* Save the LLM state */
165 dmfc2 t0, 0x0402 136 dmfc2 t0, 0x0402
166 dmfc2 t1, 0x040A 137 dmfc2 t1, 0x040A
167 sd t0, OCTEON_CP2_LLM_DAT(a0) 138 sd t0, OCTEON_CP2_LLM_DAT(a0)
168 sd t1, OCTEON_CP2_LLM_DAT+8(a0)
169 139
1701: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ 1401: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */
141 sd t1, OCTEON_CP2_LLM_DAT+8(a0)
171 142
172 /* Save the COP2 crypto state */ 143 /* Save the COP2 crypto state */
173 /* this part is mostly common to both pass 1 and later revisions */ 144 /* this part is mostly common to both pass 1 and later revisions */
@@ -198,18 +169,20 @@
198 sd t2, OCTEON_CP2_AES_KEY+16(a0) 169 sd t2, OCTEON_CP2_AES_KEY+16(a0)
199 dmfc2 t2, 0x0101 170 dmfc2 t2, 0x0101
200 sd t3, OCTEON_CP2_AES_KEY+24(a0) 171 sd t3, OCTEON_CP2_AES_KEY+24(a0)
201 mfc0 t3, $15,0 /* Get the processor ID register */ 172 mfc0 v0, $15,0 /* Get the processor ID register */
202 sd t0, OCTEON_CP2_AES_KEYLEN(a0) 173 sd t0, OCTEON_CP2_AES_KEYLEN(a0)
203 li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ 174 li v1, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
204 sd t1, OCTEON_CP2_AES_RESULT(a0) 175 sd t1, OCTEON_CP2_AES_RESULT(a0)
205 sd t2, OCTEON_CP2_AES_RESULT+8(a0)
206 /* Skip to the Pass1 version of the remainder of the COP2 state */ 176 /* Skip to the Pass1 version of the remainder of the COP2 state */
207 beq t3, t0, 2f 177 beq v0, v1, 2f
178 sd t2, OCTEON_CP2_AES_RESULT+8(a0)
208 179
209 /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ 180 /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
210 dmfc2 t1, 0x0240 181 dmfc2 t1, 0x0240
211 dmfc2 t2, 0x0241 182 dmfc2 t2, 0x0241
183 ori v1, v1, 0x9500 /* lowest OCTEON III PrId*/
212 dmfc2 t3, 0x0242 184 dmfc2 t3, 0x0242
185 subu v1, v0, v1 /* prid - lowest OCTEON III PrId */
213 dmfc2 t0, 0x0243 186 dmfc2 t0, 0x0243
214 sd t1, OCTEON_CP2_HSH_DATW(a0) 187 sd t1, OCTEON_CP2_HSH_DATW(a0)
215 dmfc2 t1, 0x0244 188 dmfc2 t1, 0x0244
@@ -262,8 +235,16 @@
262 sd t1, OCTEON_CP2_GFM_MULT+8(a0) 235 sd t1, OCTEON_CP2_GFM_MULT+8(a0)
263 sd t2, OCTEON_CP2_GFM_POLY(a0) 236 sd t2, OCTEON_CP2_GFM_POLY(a0)
264 sd t3, OCTEON_CP2_GFM_RESULT(a0) 237 sd t3, OCTEON_CP2_GFM_RESULT(a0)
265 sd t0, OCTEON_CP2_GFM_RESULT+8(a0) 238 bltz v1, 4f
239 sd t0, OCTEON_CP2_GFM_RESULT+8(a0)
240 /* OCTEON III things*/
241 dmfc2 t0, 0x024F
242 dmfc2 t1, 0x0050
243 sd t0, OCTEON_CP2_SHA3(a0)
244 sd t1, OCTEON_CP2_SHA3+8(a0)
2454:
266 jr ra 246 jr ra
247 nop
267 248
2682: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ 2492: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
269 dmfc2 t3, 0x0040 250 dmfc2 t3, 0x0040
@@ -289,7 +270,9 @@
289 270
2903: /* pass 1 or CvmCtl[NOCRYPTO] set */ 2713: /* pass 1 or CvmCtl[NOCRYPTO] set */
291 jr ra 272 jr ra
273 nop
292 END(octeon_cop2_save) 274 END(octeon_cop2_save)
275 .set pop
293 276
294/* 277/*
295 * void octeon_cop2_restore(struct octeon_cop2_state *a0) 278 * void octeon_cop2_restore(struct octeon_cop2_state *a0)
@@ -354,9 +337,9 @@
354 ld t2, OCTEON_CP2_AES_RESULT+8(a0) 337 ld t2, OCTEON_CP2_AES_RESULT+8(a0)
355 mfc0 t3, $15,0 /* Get the processor ID register */ 338 mfc0 t3, $15,0 /* Get the processor ID register */
356 dmtc2 t0, 0x0110 339 dmtc2 t0, 0x0110
357 li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ 340 li v0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
358 dmtc2 t1, 0x0100 341 dmtc2 t1, 0x0100
359 bne t0, t3, 3f /* Skip the next stuff for non-pass1 */ 342 bne v0, t3, 3f /* Skip the next stuff for non-pass1 */
360 dmtc2 t2, 0x0101 343 dmtc2 t2, 0x0101
361 344
362 /* this code is specific for pass 1 */ 345 /* this code is specific for pass 1 */
@@ -384,6 +367,7 @@
384 367
3853: /* this is post-pass1 code */ 3683: /* this is post-pass1 code */
386 ld t2, OCTEON_CP2_HSH_DATW(a0) 369 ld t2, OCTEON_CP2_HSH_DATW(a0)
370 ori v0, v0, 0x9500 /* lowest OCTEON III PrId*/
387 ld t0, OCTEON_CP2_HSH_DATW+8(a0) 371 ld t0, OCTEON_CP2_HSH_DATW+8(a0)
388 ld t1, OCTEON_CP2_HSH_DATW+16(a0) 372 ld t1, OCTEON_CP2_HSH_DATW+16(a0)
389 dmtc2 t2, 0x0240 373 dmtc2 t2, 0x0240
@@ -437,9 +421,15 @@
437 dmtc2 t2, 0x0259 421 dmtc2 t2, 0x0259
438 ld t2, OCTEON_CP2_GFM_RESULT+8(a0) 422 ld t2, OCTEON_CP2_GFM_RESULT+8(a0)
439 dmtc2 t0, 0x025E 423 dmtc2 t0, 0x025E
424 subu v0, t3, v0 /* prid - lowest OCTEON III PrId */
440 dmtc2 t1, 0x025A 425 dmtc2 t1, 0x025A
441 dmtc2 t2, 0x025B 426 bltz v0, done_restore
442 427 dmtc2 t2, 0x025B
428 /* OCTEON III things*/
429 ld t0, OCTEON_CP2_SHA3(a0)
430 ld t1, OCTEON_CP2_SHA3+8(a0)
431 dmtc2 t0, 0x0051
432 dmtc2 t1, 0x0050
443done_restore: 433done_restore:
444 jr ra 434 jr ra
445 nop 435 nop
@@ -450,18 +440,23 @@ done_restore:
450 * void octeon_mult_save() 440 * void octeon_mult_save()
451 * sp is assumed to point to a struct pt_regs 441 * sp is assumed to point to a struct pt_regs
452 * 442 *
453 * NOTE: This is called in SAVE_SOME in stackframe.h. It can only 443 * NOTE: This is called in SAVE_TEMP in stackframe.h. It can
454 * safely modify k0 and k1. 444 * safely modify v1,k0, k1,$10-$15, and $24. It will
445 * be overwritten with a processor specific version of the code.
455 */ 446 */
456 .align 7 447 .p2align 7
457 .set push 448 .set push
458 .set noreorder 449 .set noreorder
459 LEAF(octeon_mult_save) 450 LEAF(octeon_mult_save)
460 dmfc0 k0, $9,7 /* CvmCtl register. */ 451 jr ra
461 bbit1 k0, 27, 1f /* Skip CvmCtl[NOMUL] */
462 nop 452 nop
453 .space 30 * 4, 0
454octeon_mult_save_end:
455 EXPORT(octeon_mult_save_end)
456 END(octeon_mult_save)
463 457
464 /* Save the multiplier state */ 458 LEAF(octeon_mult_save2)
459 /* Save the multiplier state OCTEON II and earlier*/
465 v3mulu k0, $0, $0 460 v3mulu k0, $0, $0
466 v3mulu k1, $0, $0 461 v3mulu k1, $0, $0
467 sd k0, PT_MTP(sp) /* PT_MTP has P0 */ 462 sd k0, PT_MTP(sp) /* PT_MTP has P0 */
@@ -476,44 +471,107 @@ done_restore:
476 sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ 471 sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */
477 jr ra 472 jr ra
478 sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ 473 sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */
479 474octeon_mult_save2_end:
4801: /* Resume here if CvmCtl[NOMUL] */ 475 EXPORT(octeon_mult_save2_end)
476 END(octeon_mult_save2)
477
478 LEAF(octeon_mult_save3)
479 /* Save the multiplier state OCTEON III */
480 v3mulu $10, $0, $0 /* read P0 */
481 v3mulu $11, $0, $0 /* read P1 */
482 v3mulu $12, $0, $0 /* read P2 */
483 sd $10, PT_MTP+(0*8)(sp) /* store P0 */
484 v3mulu $10, $0, $0 /* read P3 */
485 sd $11, PT_MTP+(1*8)(sp) /* store P1 */
486 v3mulu $11, $0, $0 /* read P4 */
487 sd $12, PT_MTP+(2*8)(sp) /* store P2 */
488 ori $13, $0, 1
489 v3mulu $12, $0, $0 /* read P5 */
490 sd $10, PT_MTP+(3*8)(sp) /* store P3 */
491 v3mulu $13, $13, $0 /* P4-P0 = MPL5-MPL1, $13 = MPL0 */
492 sd $11, PT_MTP+(4*8)(sp) /* store P4 */
493 v3mulu $10, $0, $0 /* read MPL1 */
494 sd $12, PT_MTP+(5*8)(sp) /* store P5 */
495 v3mulu $11, $0, $0 /* read MPL2 */
496 sd $13, PT_MPL+(0*8)(sp) /* store MPL0 */
497 v3mulu $12, $0, $0 /* read MPL3 */
498 sd $10, PT_MPL+(1*8)(sp) /* store MPL1 */
499 v3mulu $10, $0, $0 /* read MPL4 */
500 sd $11, PT_MPL+(2*8)(sp) /* store MPL2 */
501 v3mulu $11, $0, $0 /* read MPL5 */
502 sd $12, PT_MPL+(3*8)(sp) /* store MPL3 */
503 sd $10, PT_MPL+(4*8)(sp) /* store MPL4 */
481 jr ra 504 jr ra
482 END(octeon_mult_save) 505 sd $11, PT_MPL+(5*8)(sp) /* store MPL5 */
506octeon_mult_save3_end:
507 EXPORT(octeon_mult_save3_end)
508 END(octeon_mult_save3)
483 .set pop 509 .set pop
484 510
485/* 511/*
486 * void octeon_mult_restore() 512 * void octeon_mult_restore()
487 * sp is assumed to point to a struct pt_regs 513 * sp is assumed to point to a struct pt_regs
488 * 514 *
489 * NOTE: This is called in RESTORE_SOME in stackframe.h. 515 * NOTE: This is called in RESTORE_TEMP in stackframe.h.
490 */ 516 */
491 .align 7 517 .p2align 7
492 .set push 518 .set push
493 .set noreorder 519 .set noreorder
494 LEAF(octeon_mult_restore) 520 LEAF(octeon_mult_restore)
495 dmfc0 k1, $9,7 /* CvmCtl register. */ 521 jr ra
496 ld v0, PT_MPL(sp) /* MPL0 */ 522 nop
497 ld v1, PT_MPL+8(sp) /* MPL1 */ 523 .space 30 * 4, 0
498 ld k0, PT_MPL+16(sp) /* MPL2 */ 524octeon_mult_restore_end:
499 bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */ 525 EXPORT(octeon_mult_restore_end)
500 /* Normally falls through, so no time wasted here */ 526 END(octeon_mult_restore)
501 nop
502 527
528 LEAF(octeon_mult_restore2)
529 ld v0, PT_MPL(sp) /* MPL0 */
530 ld v1, PT_MPL+8(sp) /* MPL1 */
531 ld k0, PT_MPL+16(sp) /* MPL2 */
503 /* Restore the multiplier state */ 532 /* Restore the multiplier state */
504 ld k1, PT_MTP+16(sp) /* P2 */ 533 ld k1, PT_MTP+16(sp) /* P2 */
505 MTM0 v0 /* MPL0 */ 534 mtm0 v0 /* MPL0 */
506 ld v0, PT_MTP+8(sp) /* P1 */ 535 ld v0, PT_MTP+8(sp) /* P1 */
507 MTM1 v1 /* MPL1 */ 536 mtm1 v1 /* MPL1 */
508 ld v1, PT_MTP(sp) /* P0 */ 537 ld v1, PT_MTP(sp) /* P0 */
509 MTM2 k0 /* MPL2 */ 538 mtm2 k0 /* MPL2 */
510 MTP2 k1 /* P2 */ 539 mtp2 k1 /* P2 */
511 MTP1 v0 /* P1 */ 540 mtp1 v0 /* P1 */
512 jr ra 541 jr ra
513 MTP0 v1 /* P0 */ 542 mtp0 v1 /* P0 */
514 543octeon_mult_restore2_end:
5151: /* Resume here if CvmCtl[NOMUL] */ 544 EXPORT(octeon_mult_restore2_end)
545 END(octeon_mult_restore2)
546
547 LEAF(octeon_mult_restore3)
548 ld $12, PT_MPL+(0*8)(sp) /* read MPL0 */
549 ld $13, PT_MPL+(3*8)(sp) /* read MPL3 */
550 ld $10, PT_MPL+(1*8)(sp) /* read MPL1 */
551 ld $11, PT_MPL+(4*8)(sp) /* read MPL4 */
552 .word 0x718d0008
553 /* mtm0 $12, $13 restore MPL0 and MPL3 */
554 ld $12, PT_MPL+(2*8)(sp) /* read MPL2 */
555 .word 0x714b000c
556 /* mtm1 $10, $11 restore MPL1 and MPL4 */
557 ld $13, PT_MPL+(5*8)(sp) /* read MPL5 */
558 ld $10, PT_MTP+(0*8)(sp) /* read P0 */
559 ld $11, PT_MTP+(3*8)(sp) /* read P3 */
560 .word 0x718d000d
561 /* mtm2 $12, $13 restore MPL2 and MPL5 */
562 ld $12, PT_MTP+(1*8)(sp) /* read P1 */
563 .word 0x714b0009
564 /* mtp0 $10, $11 restore P0 and P3 */
565 ld $13, PT_MTP+(4*8)(sp) /* read P4 */
566 ld $10, PT_MTP+(2*8)(sp) /* read P2 */
567 ld $11, PT_MTP+(5*8)(sp) /* read P5 */
568 .word 0x718d000a
569 /* mtp1 $12, $13 restore P1 and P4 */
516 jr ra 570 jr ra
517 nop 571 .word 0x714b000b
518 END(octeon_mult_restore) 572 /* mtp2 $10, $11 restore P2 and P5 */
573
574octeon_mult_restore3_end:
575 EXPORT(octeon_mult_restore3_end)
576 END(octeon_mult_restore3)
519 .set pop 577 .set pop
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 097fc8d14e42..130af7d26a9c 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -82,7 +82,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
82 seq_printf(m, "]\n"); 82 seq_printf(m, "]\n");
83 } 83 }
84 84
85 seq_printf(m, "isa\t\t\t: mips1"); 85 seq_printf(m, "isa\t\t\t:");
86 if (cpu_has_mips_r1)
87 seq_printf(m, " mips1");
86 if (cpu_has_mips_2) 88 if (cpu_has_mips_2)
87 seq_printf(m, "%s", " mips2"); 89 seq_printf(m, "%s", " mips2");
88 if (cpu_has_mips_3) 90 if (cpu_has_mips_3)
@@ -95,10 +97,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
95 seq_printf(m, "%s", " mips32r1"); 97 seq_printf(m, "%s", " mips32r1");
96 if (cpu_has_mips32r2) 98 if (cpu_has_mips32r2)
97 seq_printf(m, "%s", " mips32r2"); 99 seq_printf(m, "%s", " mips32r2");
100 if (cpu_has_mips32r6)
101 seq_printf(m, "%s", " mips32r6");
98 if (cpu_has_mips64r1) 102 if (cpu_has_mips64r1)
99 seq_printf(m, "%s", " mips64r1"); 103 seq_printf(m, "%s", " mips64r1");
100 if (cpu_has_mips64r2) 104 if (cpu_has_mips64r2)
101 seq_printf(m, "%s", " mips64r2"); 105 seq_printf(m, "%s", " mips64r2");
106 if (cpu_has_mips64r6)
107 seq_printf(m, "%s", " mips64r6");
102 seq_printf(m, "\n"); 108 seq_printf(m, "\n");
103 109
104 seq_printf(m, "ASEs implemented\t:"); 110 seq_printf(m, "ASEs implemented\t:");
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 85bff5d513e5..bf85cc180d91 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -25,6 +25,7 @@
25#include <linux/completion.h> 25#include <linux/completion.h>
26#include <linux/kallsyms.h> 26#include <linux/kallsyms.h>
27#include <linux/random.h> 27#include <linux/random.h>
28#include <linux/prctl.h>
28 29
29#include <asm/asm.h> 30#include <asm/asm.h>
30#include <asm/bootinfo.h> 31#include <asm/bootinfo.h>
@@ -562,3 +563,98 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
562{ 563{
563 smp_call_function(arch_dump_stack, NULL, 1); 564 smp_call_function(arch_dump_stack, NULL, 1);
564} 565}
566
567int mips_get_process_fp_mode(struct task_struct *task)
568{
569 int value = 0;
570
571 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
572 value |= PR_FP_MODE_FR;
573 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
574 value |= PR_FP_MODE_FRE;
575
576 return value;
577}
578
579int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
580{
581 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
582 unsigned long switch_count;
583 struct task_struct *t;
584
585 /* Check the value is valid */
586 if (value & ~known_bits)
587 return -EOPNOTSUPP;
588
589 /* Avoid inadvertently triggering emulation */
590 if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
591 !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
592 return -EOPNOTSUPP;
593 if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
594 return -EOPNOTSUPP;
595
596 /* FR = 0 not supported in MIPS R6 */
597 if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
598 return -EOPNOTSUPP;
599
600 /* Save FP & vector context, then disable FPU & MSA */
601 if (task->signal == current->signal)
602 lose_fpu(1);
603
604 /* Prevent any threads from obtaining live FP context */
605 atomic_set(&task->mm->context.fp_mode_switching, 1);
606 smp_mb__after_atomic();
607
608 /*
609 * If there are multiple online CPUs then wait until all threads whose
610 * FP mode is about to change have been context switched. This approach
611 * allows us to only worry about whether an FP mode switch is in
612 * progress when FP is first used in a tasks time slice. Pretty much all
613 * of the mode switch overhead can thus be confined to cases where mode
614 * switches are actually occuring. That is, to here. However for the
615 * thread performing the mode switch it may take a while...
616 */
617 if (num_online_cpus() > 1) {
618 spin_lock_irq(&task->sighand->siglock);
619
620 for_each_thread(task, t) {
621 if (t == current)
622 continue;
623
624 switch_count = t->nvcsw + t->nivcsw;
625
626 do {
627 spin_unlock_irq(&task->sighand->siglock);
628 cond_resched();
629 spin_lock_irq(&task->sighand->siglock);
630 } while ((t->nvcsw + t->nivcsw) == switch_count);
631 }
632
633 spin_unlock_irq(&task->sighand->siglock);
634 }
635
636 /*
637 * There are now no threads of the process with live FP context, so it
638 * is safe to proceed with the FP mode switch.
639 */
640 for_each_thread(task, t) {
641 /* Update desired FP register width */
642 if (value & PR_FP_MODE_FR) {
643 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
644 } else {
645 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
646 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
647 }
648
649 /* Update desired FP single layout */
650 if (value & PR_FP_MODE_FRE)
651 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
652 else
653 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
654 }
655
656 /* Allow threads to use FP again */
657 atomic_set(&task->mm->context.fp_mode_switching, 0);
658
659 return 0;
660}
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 6c160c67984c..676c5030a953 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -34,7 +34,7 @@
34 .endm 34 .endm
35 35
36 .set noreorder 36 .set noreorder
37 .set arch=r4000 37 .set MIPS_ISA_ARCH_LEVEL_RAW
38 38
39LEAF(_save_fp_context) 39LEAF(_save_fp_context)
40 .set push 40 .set push
@@ -42,7 +42,8 @@ LEAF(_save_fp_context)
42 cfc1 t1, fcr31 42 cfc1 t1, fcr31
43 .set pop 43 .set pop
44 44
45#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 45#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
46 defined(CONFIG_CPU_MIPS32_R6)
46 .set push 47 .set push
47 SET_HARDFLOAT 48 SET_HARDFLOAT
48#ifdef CONFIG_CPU_MIPS32_R2 49#ifdef CONFIG_CPU_MIPS32_R2
@@ -105,10 +106,12 @@ LEAF(_save_fp_context32)
105 SET_HARDFLOAT 106 SET_HARDFLOAT
106 cfc1 t1, fcr31 107 cfc1 t1, fcr31
107 108
109#ifndef CONFIG_CPU_MIPS64_R6
108 mfc0 t0, CP0_STATUS 110 mfc0 t0, CP0_STATUS
109 sll t0, t0, 5 111 sll t0, t0, 5
110 bgez t0, 1f # skip storing odd if FR=0 112 bgez t0, 1f # skip storing odd if FR=0
111 nop 113 nop
114#endif
112 115
113 /* Store the 16 odd double precision registers */ 116 /* Store the 16 odd double precision registers */
114 EX sdc1 $f1, SC32_FPREGS+8(a0) 117 EX sdc1 $f1, SC32_FPREGS+8(a0)
@@ -163,7 +166,8 @@ LEAF(_save_fp_context32)
163LEAF(_restore_fp_context) 166LEAF(_restore_fp_context)
164 EX lw t1, SC_FPC_CSR(a0) 167 EX lw t1, SC_FPC_CSR(a0)
165 168
166#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 169#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
170 defined(CONFIG_CPU_MIPS32_R6)
167 .set push 171 .set push
168 SET_HARDFLOAT 172 SET_HARDFLOAT
169#ifdef CONFIG_CPU_MIPS32_R2 173#ifdef CONFIG_CPU_MIPS32_R2
@@ -223,10 +227,12 @@ LEAF(_restore_fp_context32)
223 SET_HARDFLOAT 227 SET_HARDFLOAT
224 EX lw t1, SC32_FPC_CSR(a0) 228 EX lw t1, SC32_FPC_CSR(a0)
225 229
230#ifndef CONFIG_CPU_MIPS64_R6
226 mfc0 t0, CP0_STATUS 231 mfc0 t0, CP0_STATUS
227 sll t0, t0, 5 232 sll t0, t0, 5
228 bgez t0, 1f # skip loading odd if FR=0 233 bgez t0, 1f # skip loading odd if FR=0
229 nop 234 nop
235#endif
230 236
231 EX ldc1 $f1, SC32_FPREGS+8(a0) 237 EX ldc1 $f1, SC32_FPREGS+8(a0)
232 EX ldc1 $f3, SC32_FPREGS+24(a0) 238 EX ldc1 $f3, SC32_FPREGS+24(a0)
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 64591e671878..3b1a36f13a7d 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -115,7 +115,8 @@
115 * Save a thread's fp context. 115 * Save a thread's fp context.
116 */ 116 */
117LEAF(_save_fp) 117LEAF(_save_fp)
118#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 118#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
119 defined(CONFIG_CPU_MIPS32_R6)
119 mfc0 t0, CP0_STATUS 120 mfc0 t0, CP0_STATUS
120#endif 121#endif
121 fpu_save_double a0 t0 t1 # clobbers t1 122 fpu_save_double a0 t0 t1 # clobbers t1
@@ -126,7 +127,8 @@ LEAF(_save_fp)
126 * Restore a thread's fp context. 127 * Restore a thread's fp context.
127 */ 128 */
128LEAF(_restore_fp) 129LEAF(_restore_fp)
129#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 130#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
131 defined(CONFIG_CPU_MIPS32_R6)
130 mfc0 t0, CP0_STATUS 132 mfc0 t0, CP0_STATUS
131#endif 133#endif
132 fpu_restore_double a0 t0 t1 # clobbers t1 134 fpu_restore_double a0 t0 t1 # clobbers t1
@@ -240,9 +242,9 @@ LEAF(_init_fpu)
240 mtc1 t1, $f30 242 mtc1 t1, $f30
241 mtc1 t1, $f31 243 mtc1 t1, $f31
242 244
243#ifdef CONFIG_CPU_MIPS32_R2 245#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
244 .set push 246 .set push
245 .set mips32r2 247 .set MIPS_ISA_LEVEL_RAW
246 .set fp=64 248 .set fp=64
247 sll t0, t0, 5 # is Status.FR set? 249 sll t0, t0, 5 # is Status.FR set?
248 bgez t0, 1f # no: skip setting upper 32b 250 bgez t0, 1f # no: skip setting upper 32b
@@ -280,9 +282,9 @@ LEAF(_init_fpu)
280 mthc1 t1, $f30 282 mthc1 t1, $f30
281 mthc1 t1, $f31 283 mthc1 t1, $f31
2821: .set pop 2841: .set pop
283#endif /* CONFIG_CPU_MIPS32_R2 */ 285#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */
284#else 286#else
285 .set arch=r4000 287 .set MIPS_ISA_ARCH_LEVEL_RAW
286 dmtc1 t1, $f0 288 dmtc1 t1, $f0
287 dmtc1 t1, $f2 289 dmtc1 t1, $f2
288 dmtc1 t1, $f4 290 dmtc1 t1, $f4
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 67f2495def1c..d1168d7c31e8 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -208,6 +208,7 @@ void spram_config(void)
208 case CPU_INTERAPTIV: 208 case CPU_INTERAPTIV:
209 case CPU_PROAPTIV: 209 case CPU_PROAPTIV:
210 case CPU_P5600: 210 case CPU_P5600:
211 case CPU_QEMU_GENERIC:
211 config0 = read_c0_config(); 212 config0 = read_c0_config();
212 /* FIXME: addresses are Malta specific */ 213 /* FIXME: addresses are Malta specific */
213 if (config0 & (1<<24)) { 214 if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 604b558809c4..53a7ef9a8f32 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -136,7 +136,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
136 : "memory"); 136 : "memory");
137 } else if (cpu_has_llsc) { 137 } else if (cpu_has_llsc) {
138 __asm__ __volatile__ ( 138 __asm__ __volatile__ (
139 " .set arch=r4000 \n" 139 " .set "MIPS_ISA_ARCH_LEVEL" \n"
140 " li %[err], 0 \n" 140 " li %[err], 0 \n"
141 "1: ll %[old], (%[addr]) \n" 141 "1: ll %[old], (%[addr]) \n"
142 " move %[tmp], %[new] \n" 142 " move %[tmp], %[new] \n"
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index c3b41e24c05a..33984c04b60b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -46,6 +46,7 @@
46#include <asm/fpu.h> 46#include <asm/fpu.h>
47#include <asm/fpu_emulator.h> 47#include <asm/fpu_emulator.h>
48#include <asm/idle.h> 48#include <asm/idle.h>
49#include <asm/mips-r2-to-r6-emul.h>
49#include <asm/mipsregs.h> 50#include <asm/mipsregs.h>
50#include <asm/mipsmtregs.h> 51#include <asm/mipsmtregs.h>
51#include <asm/module.h> 52#include <asm/module.h>
@@ -837,7 +838,7 @@ out:
837 exception_exit(prev_state); 838 exception_exit(prev_state);
838} 839}
839 840
840static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 841void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
841 const char *str) 842 const char *str)
842{ 843{
843 siginfo_t info; 844 siginfo_t info;
@@ -1027,7 +1028,34 @@ asmlinkage void do_ri(struct pt_regs *regs)
1027 unsigned int opcode = 0; 1028 unsigned int opcode = 0;
1028 int status = -1; 1029 int status = -1;
1029 1030
1031 /*
1032 * Avoid any kernel code. Just emulate the R2 instruction
1033 * as quickly as possible.
1034 */
1035 if (mipsr2_emulation && cpu_has_mips_r6 &&
1036 likely(user_mode(regs))) {
1037 if (likely(get_user(opcode, epc) >= 0)) {
1038 status = mipsr2_decoder(regs, opcode);
1039 switch (status) {
1040 case 0:
1041 case SIGEMT:
1042 task_thread_info(current)->r2_emul_return = 1;
1043 return;
1044 case SIGILL:
1045 goto no_r2_instr;
1046 default:
1047 process_fpemu_return(status,
1048 &current->thread.cp0_baduaddr);
1049 task_thread_info(current)->r2_emul_return = 1;
1050 return;
1051 }
1052 }
1053 }
1054
1055no_r2_instr:
1056
1030 prev_state = exception_enter(); 1057 prev_state = exception_enter();
1058
1031 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), 1059 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
1032 SIGILL) == NOTIFY_STOP) 1060 SIGILL) == NOTIFY_STOP)
1033 goto out; 1061 goto out;
@@ -1134,10 +1162,29 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1134 return NOTIFY_OK; 1162 return NOTIFY_OK;
1135} 1163}
1136 1164
1165static int wait_on_fp_mode_switch(atomic_t *p)
1166{
1167 /*
1168 * The FP mode for this task is currently being switched. That may
1169 * involve modifications to the format of this tasks FP context which
1170 * make it unsafe to proceed with execution for the moment. Instead,
1171 * schedule some other task.
1172 */
1173 schedule();
1174 return 0;
1175}
1176
1137static int enable_restore_fp_context(int msa) 1177static int enable_restore_fp_context(int msa)
1138{ 1178{
1139 int err, was_fpu_owner, prior_msa; 1179 int err, was_fpu_owner, prior_msa;
1140 1180
1181 /*
1182 * If an FP mode switch is currently underway, wait for it to
1183 * complete before proceeding.
1184 */
1185 wait_on_atomic_t(&current->mm->context.fp_mode_switching,
1186 wait_on_fp_mode_switch, TASK_KILLABLE);
1187
1141 if (!used_math()) { 1188 if (!used_math()) {
1142 /* First time FP context user. */ 1189 /* First time FP context user. */
1143 preempt_disable(); 1190 preempt_disable();
@@ -1541,6 +1588,7 @@ static inline void parity_protection_init(void)
1541 case CPU_INTERAPTIV: 1588 case CPU_INTERAPTIV:
1542 case CPU_PROAPTIV: 1589 case CPU_PROAPTIV:
1543 case CPU_P5600: 1590 case CPU_P5600:
1591 case CPU_QEMU_GENERIC:
1544 { 1592 {
1545#define ERRCTL_PE 0x80000000 1593#define ERRCTL_PE 0x80000000
1546#define ERRCTL_L2P 0x00800000 1594#define ERRCTL_L2P 0x00800000
@@ -1630,7 +1678,7 @@ asmlinkage void cache_parity_error(void)
1630 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 1678 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1631 reg_val & (1<<30) ? "secondary" : "primary", 1679 reg_val & (1<<30) ? "secondary" : "primary",
1632 reg_val & (1<<31) ? "data" : "insn"); 1680 reg_val & (1<<31) ? "data" : "insn");
1633 if (cpu_has_mips_r2 && 1681 if ((cpu_has_mips_r2_r6) &&
1634 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1682 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1635 pr_err("Error bits: %s%s%s%s%s%s%s%s\n", 1683 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1636 reg_val & (1<<29) ? "ED " : "", 1684 reg_val & (1<<29) ? "ED " : "",
@@ -1670,7 +1718,7 @@ asmlinkage void do_ftlb(void)
1670 unsigned int reg_val; 1718 unsigned int reg_val;
1671 1719
1672 /* For the moment, report the problem and hang. */ 1720 /* For the moment, report the problem and hang. */
1673 if (cpu_has_mips_r2 && 1721 if ((cpu_has_mips_r2_r6) &&
1674 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1722 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1675 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", 1723 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1676 read_c0_ecc()); 1724 read_c0_ecc());
@@ -1959,7 +2007,7 @@ static void configure_hwrena(void)
1959{ 2007{
1960 unsigned int hwrena = cpu_hwrena_impl_bits; 2008 unsigned int hwrena = cpu_hwrena_impl_bits;
1961 2009
1962 if (cpu_has_mips_r2) 2010 if (cpu_has_mips_r2_r6)
1963 hwrena |= 0x0000000f; 2011 hwrena |= 0x0000000f;
1964 2012
1965 if (!noulri && cpu_has_userlocal) 2013 if (!noulri && cpu_has_userlocal)
@@ -2003,7 +2051,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
2003 * o read IntCtl.IPTI to determine the timer interrupt 2051 * o read IntCtl.IPTI to determine the timer interrupt
2004 * o read IntCtl.IPPCI to determine the performance counter interrupt 2052 * o read IntCtl.IPPCI to determine the performance counter interrupt
2005 */ 2053 */
2006 if (cpu_has_mips_r2) { 2054 if (cpu_has_mips_r2_r6) {
2007 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; 2055 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2008 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; 2056 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2009 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; 2057 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
@@ -2094,7 +2142,7 @@ void __init trap_init(void)
2094#else 2142#else
2095 ebase = CKSEG0; 2143 ebase = CKSEG0;
2096#endif 2144#endif
2097 if (cpu_has_mips_r2) 2145 if (cpu_has_mips_r2_r6)
2098 ebase += (read_c0_ebase() & 0x3ffff000); 2146 ebase += (read_c0_ebase() & 0x3ffff000);
2099 } 2147 }
2100 2148
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index e11906dff885..bbb69695a0a1 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -129,6 +129,7 @@ extern void show_registers(struct pt_regs *regs);
129 : "=&r" (value), "=r" (res) \ 129 : "=&r" (value), "=r" (res) \
130 : "r" (addr), "i" (-EFAULT)); 130 : "r" (addr), "i" (-EFAULT));
131 131
132#ifndef CONFIG_CPU_MIPSR6
132#define LoadW(addr, value, res) \ 133#define LoadW(addr, value, res) \
133 __asm__ __volatile__ ( \ 134 __asm__ __volatile__ ( \
134 "1:\t"user_lwl("%0", "(%2)")"\n" \ 135 "1:\t"user_lwl("%0", "(%2)")"\n" \
@@ -146,6 +147,39 @@ extern void show_registers(struct pt_regs *regs);
146 ".previous" \ 147 ".previous" \
147 : "=&r" (value), "=r" (res) \ 148 : "=&r" (value), "=r" (res) \
148 : "r" (addr), "i" (-EFAULT)); 149 : "r" (addr), "i" (-EFAULT));
150#else
151/* MIPSR6 has no lwl instruction */
152#define LoadW(addr, value, res) \
153 __asm__ __volatile__ ( \
154 ".set\tpush\n" \
155 ".set\tnoat\n\t" \
156 "1:"user_lb("%0", "0(%2)")"\n\t" \
157 "2:"user_lbu("$1", "1(%2)")"\n\t" \
158 "sll\t%0, 0x8\n\t" \
159 "or\t%0, $1\n\t" \
160 "3:"user_lbu("$1", "2(%2)")"\n\t" \
161 "sll\t%0, 0x8\n\t" \
162 "or\t%0, $1\n\t" \
163 "4:"user_lbu("$1", "3(%2)")"\n\t" \
164 "sll\t%0, 0x8\n\t" \
165 "or\t%0, $1\n\t" \
166 "li\t%1, 0\n" \
167 ".set\tpop\n" \
168 "10:\n\t" \
169 ".insn\n\t" \
170 ".section\t.fixup,\"ax\"\n\t" \
171 "11:\tli\t%1, %3\n\t" \
172 "j\t10b\n\t" \
173 ".previous\n\t" \
174 ".section\t__ex_table,\"a\"\n\t" \
175 STR(PTR)"\t1b, 11b\n\t" \
176 STR(PTR)"\t2b, 11b\n\t" \
177 STR(PTR)"\t3b, 11b\n\t" \
178 STR(PTR)"\t4b, 11b\n\t" \
179 ".previous" \
180 : "=&r" (value), "=r" (res) \
181 : "r" (addr), "i" (-EFAULT));
182#endif /* CONFIG_CPU_MIPSR6 */
149 183
150#define LoadHWU(addr, value, res) \ 184#define LoadHWU(addr, value, res) \
151 __asm__ __volatile__ ( \ 185 __asm__ __volatile__ ( \
@@ -169,6 +203,7 @@ extern void show_registers(struct pt_regs *regs);
169 : "=&r" (value), "=r" (res) \ 203 : "=&r" (value), "=r" (res) \
170 : "r" (addr), "i" (-EFAULT)); 204 : "r" (addr), "i" (-EFAULT));
171 205
206#ifndef CONFIG_CPU_MIPSR6
172#define LoadWU(addr, value, res) \ 207#define LoadWU(addr, value, res) \
173 __asm__ __volatile__ ( \ 208 __asm__ __volatile__ ( \
174 "1:\t"user_lwl("%0", "(%2)")"\n" \ 209 "1:\t"user_lwl("%0", "(%2)")"\n" \
@@ -206,6 +241,87 @@ extern void show_registers(struct pt_regs *regs);
206 ".previous" \ 241 ".previous" \
207 : "=&r" (value), "=r" (res) \ 242 : "=&r" (value), "=r" (res) \
208 : "r" (addr), "i" (-EFAULT)); 243 : "r" (addr), "i" (-EFAULT));
244#else
245/* MIPSR6 has not lwl and ldl instructions */
246#define LoadWU(addr, value, res) \
247 __asm__ __volatile__ ( \
248 ".set\tpush\n\t" \
249 ".set\tnoat\n\t" \
250 "1:"user_lbu("%0", "0(%2)")"\n\t" \
251 "2:"user_lbu("$1", "1(%2)")"\n\t" \
252 "sll\t%0, 0x8\n\t" \
253 "or\t%0, $1\n\t" \
254 "3:"user_lbu("$1", "2(%2)")"\n\t" \
255 "sll\t%0, 0x8\n\t" \
256 "or\t%0, $1\n\t" \
257 "4:"user_lbu("$1", "3(%2)")"\n\t" \
258 "sll\t%0, 0x8\n\t" \
259 "or\t%0, $1\n\t" \
260 "li\t%1, 0\n" \
261 ".set\tpop\n" \
262 "10:\n\t" \
263 ".insn\n\t" \
264 ".section\t.fixup,\"ax\"\n\t" \
265 "11:\tli\t%1, %3\n\t" \
266 "j\t10b\n\t" \
267 ".previous\n\t" \
268 ".section\t__ex_table,\"a\"\n\t" \
269 STR(PTR)"\t1b, 11b\n\t" \
270 STR(PTR)"\t2b, 11b\n\t" \
271 STR(PTR)"\t3b, 11b\n\t" \
272 STR(PTR)"\t4b, 11b\n\t" \
273 ".previous" \
274 : "=&r" (value), "=r" (res) \
275 : "r" (addr), "i" (-EFAULT));
276
277#define LoadDW(addr, value, res) \
278 __asm__ __volatile__ ( \
279 ".set\tpush\n\t" \
280 ".set\tnoat\n\t" \
281 "1:lb\t%0, 0(%2)\n\t" \
282 "2:lbu\t $1, 1(%2)\n\t" \
283 "dsll\t%0, 0x8\n\t" \
284 "or\t%0, $1\n\t" \
285 "3:lbu\t$1, 2(%2)\n\t" \
286 "dsll\t%0, 0x8\n\t" \
287 "or\t%0, $1\n\t" \
288 "4:lbu\t$1, 3(%2)\n\t" \
289 "dsll\t%0, 0x8\n\t" \
290 "or\t%0, $1\n\t" \
291 "5:lbu\t$1, 4(%2)\n\t" \
292 "dsll\t%0, 0x8\n\t" \
293 "or\t%0, $1\n\t" \
294 "6:lbu\t$1, 5(%2)\n\t" \
295 "dsll\t%0, 0x8\n\t" \
296 "or\t%0, $1\n\t" \
297 "7:lbu\t$1, 6(%2)\n\t" \
298 "dsll\t%0, 0x8\n\t" \
299 "or\t%0, $1\n\t" \
300 "8:lbu\t$1, 7(%2)\n\t" \
301 "dsll\t%0, 0x8\n\t" \
302 "or\t%0, $1\n\t" \
303 "li\t%1, 0\n" \
304 ".set\tpop\n\t" \
305 "10:\n\t" \
306 ".insn\n\t" \
307 ".section\t.fixup,\"ax\"\n\t" \
308 "11:\tli\t%1, %3\n\t" \
309 "j\t10b\n\t" \
310 ".previous\n\t" \
311 ".section\t__ex_table,\"a\"\n\t" \
312 STR(PTR)"\t1b, 11b\n\t" \
313 STR(PTR)"\t2b, 11b\n\t" \
314 STR(PTR)"\t3b, 11b\n\t" \
315 STR(PTR)"\t4b, 11b\n\t" \
316 STR(PTR)"\t5b, 11b\n\t" \
317 STR(PTR)"\t6b, 11b\n\t" \
318 STR(PTR)"\t7b, 11b\n\t" \
319 STR(PTR)"\t8b, 11b\n\t" \
320 ".previous" \
321 : "=&r" (value), "=r" (res) \
322 : "r" (addr), "i" (-EFAULT));
323#endif /* CONFIG_CPU_MIPSR6 */
324
209 325
210#define StoreHW(addr, value, res) \ 326#define StoreHW(addr, value, res) \
211 __asm__ __volatile__ ( \ 327 __asm__ __volatile__ ( \
@@ -228,6 +344,7 @@ extern void show_registers(struct pt_regs *regs);
228 : "=r" (res) \ 344 : "=r" (res) \
229 : "r" (value), "r" (addr), "i" (-EFAULT)); 345 : "r" (value), "r" (addr), "i" (-EFAULT));
230 346
347#ifndef CONFIG_CPU_MIPSR6
231#define StoreW(addr, value, res) \ 348#define StoreW(addr, value, res) \
232 __asm__ __volatile__ ( \ 349 __asm__ __volatile__ ( \
233 "1:\t"user_swl("%1", "(%2)")"\n" \ 350 "1:\t"user_swl("%1", "(%2)")"\n" \
@@ -263,9 +380,82 @@ extern void show_registers(struct pt_regs *regs);
263 ".previous" \ 380 ".previous" \
264 : "=r" (res) \ 381 : "=r" (res) \
265 : "r" (value), "r" (addr), "i" (-EFAULT)); 382 : "r" (value), "r" (addr), "i" (-EFAULT));
266#endif 383#else
384/* MIPSR6 has no swl and sdl instructions */
385#define StoreW(addr, value, res) \
386 __asm__ __volatile__ ( \
387 ".set\tpush\n\t" \
388 ".set\tnoat\n\t" \
389 "1:"user_sb("%1", "3(%2)")"\n\t" \
390 "srl\t$1, %1, 0x8\n\t" \
391 "2:"user_sb("$1", "2(%2)")"\n\t" \
392 "srl\t$1, $1, 0x8\n\t" \
393 "3:"user_sb("$1", "1(%2)")"\n\t" \
394 "srl\t$1, $1, 0x8\n\t" \
395 "4:"user_sb("$1", "0(%2)")"\n\t" \
396 ".set\tpop\n\t" \
397 "li\t%0, 0\n" \
398 "10:\n\t" \
399 ".insn\n\t" \
400 ".section\t.fixup,\"ax\"\n\t" \
401 "11:\tli\t%0, %3\n\t" \
402 "j\t10b\n\t" \
403 ".previous\n\t" \
404 ".section\t__ex_table,\"a\"\n\t" \
405 STR(PTR)"\t1b, 11b\n\t" \
406 STR(PTR)"\t2b, 11b\n\t" \
407 STR(PTR)"\t3b, 11b\n\t" \
408 STR(PTR)"\t4b, 11b\n\t" \
409 ".previous" \
410 : "=&r" (res) \
411 : "r" (value), "r" (addr), "i" (-EFAULT) \
412 : "memory");
413
414#define StoreDW(addr, value, res) \
415 __asm__ __volatile__ ( \
416 ".set\tpush\n\t" \
417 ".set\tnoat\n\t" \
418 "1:sb\t%1, 7(%2)\n\t" \
419 "dsrl\t$1, %1, 0x8\n\t" \
420 "2:sb\t$1, 6(%2)\n\t" \
421 "dsrl\t$1, $1, 0x8\n\t" \
422 "3:sb\t$1, 5(%2)\n\t" \
423 "dsrl\t$1, $1, 0x8\n\t" \
424 "4:sb\t$1, 4(%2)\n\t" \
425 "dsrl\t$1, $1, 0x8\n\t" \
426 "5:sb\t$1, 3(%2)\n\t" \
427 "dsrl\t$1, $1, 0x8\n\t" \
428 "6:sb\t$1, 2(%2)\n\t" \
429 "dsrl\t$1, $1, 0x8\n\t" \
430 "7:sb\t$1, 1(%2)\n\t" \
431 "dsrl\t$1, $1, 0x8\n\t" \
432 "8:sb\t$1, 0(%2)\n\t" \
433 "dsrl\t$1, $1, 0x8\n\t" \
434 ".set\tpop\n\t" \
435 "li\t%0, 0\n" \
436 "10:\n\t" \
437 ".insn\n\t" \
438 ".section\t.fixup,\"ax\"\n\t" \
439 "11:\tli\t%0, %3\n\t" \
440 "j\t10b\n\t" \
441 ".previous\n\t" \
442 ".section\t__ex_table,\"a\"\n\t" \
443 STR(PTR)"\t1b, 11b\n\t" \
444 STR(PTR)"\t2b, 11b\n\t" \
445 STR(PTR)"\t3b, 11b\n\t" \
446 STR(PTR)"\t4b, 11b\n\t" \
447 STR(PTR)"\t5b, 11b\n\t" \
448 STR(PTR)"\t6b, 11b\n\t" \
449 STR(PTR)"\t7b, 11b\n\t" \
450 STR(PTR)"\t8b, 11b\n\t" \
451 ".previous" \
452 : "=&r" (res) \
453 : "r" (value), "r" (addr), "i" (-EFAULT) \
454 : "memory");
455#endif /* CONFIG_CPU_MIPSR6 */
456
457#else /* __BIG_ENDIAN */
267 458
268#ifdef __LITTLE_ENDIAN
269#define LoadHW(addr, value, res) \ 459#define LoadHW(addr, value, res) \
270 __asm__ __volatile__ (".set\tnoat\n" \ 460 __asm__ __volatile__ (".set\tnoat\n" \
271 "1:\t"user_lb("%0", "1(%2)")"\n" \ 461 "1:\t"user_lb("%0", "1(%2)")"\n" \
@@ -286,6 +476,7 @@ extern void show_registers(struct pt_regs *regs);
286 : "=&r" (value), "=r" (res) \ 476 : "=&r" (value), "=r" (res) \
287 : "r" (addr), "i" (-EFAULT)); 477 : "r" (addr), "i" (-EFAULT));
288 478
479#ifndef CONFIG_CPU_MIPSR6
289#define LoadW(addr, value, res) \ 480#define LoadW(addr, value, res) \
290 __asm__ __volatile__ ( \ 481 __asm__ __volatile__ ( \
291 "1:\t"user_lwl("%0", "3(%2)")"\n" \ 482 "1:\t"user_lwl("%0", "3(%2)")"\n" \
@@ -303,6 +494,40 @@ extern void show_registers(struct pt_regs *regs);
303 ".previous" \ 494 ".previous" \
304 : "=&r" (value), "=r" (res) \ 495 : "=&r" (value), "=r" (res) \
305 : "r" (addr), "i" (-EFAULT)); 496 : "r" (addr), "i" (-EFAULT));
497#else
498/* MIPSR6 has no lwl instruction */
499#define LoadW(addr, value, res) \
500 __asm__ __volatile__ ( \
501 ".set\tpush\n" \
502 ".set\tnoat\n\t" \
503 "1:"user_lb("%0", "3(%2)")"\n\t" \
504 "2:"user_lbu("$1", "2(%2)")"\n\t" \
505 "sll\t%0, 0x8\n\t" \
506 "or\t%0, $1\n\t" \
507 "3:"user_lbu("$1", "1(%2)")"\n\t" \
508 "sll\t%0, 0x8\n\t" \
509 "or\t%0, $1\n\t" \
510 "4:"user_lbu("$1", "0(%2)")"\n\t" \
511 "sll\t%0, 0x8\n\t" \
512 "or\t%0, $1\n\t" \
513 "li\t%1, 0\n" \
514 ".set\tpop\n" \
515 "10:\n\t" \
516 ".insn\n\t" \
517 ".section\t.fixup,\"ax\"\n\t" \
518 "11:\tli\t%1, %3\n\t" \
519 "j\t10b\n\t" \
520 ".previous\n\t" \
521 ".section\t__ex_table,\"a\"\n\t" \
522 STR(PTR)"\t1b, 11b\n\t" \
523 STR(PTR)"\t2b, 11b\n\t" \
524 STR(PTR)"\t3b, 11b\n\t" \
525 STR(PTR)"\t4b, 11b\n\t" \
526 ".previous" \
527 : "=&r" (value), "=r" (res) \
528 : "r" (addr), "i" (-EFAULT));
529#endif /* CONFIG_CPU_MIPSR6 */
530
306 531
307#define LoadHWU(addr, value, res) \ 532#define LoadHWU(addr, value, res) \
308 __asm__ __volatile__ ( \ 533 __asm__ __volatile__ ( \
@@ -326,6 +551,7 @@ extern void show_registers(struct pt_regs *regs);
326 : "=&r" (value), "=r" (res) \ 551 : "=&r" (value), "=r" (res) \
327 : "r" (addr), "i" (-EFAULT)); 552 : "r" (addr), "i" (-EFAULT));
328 553
554#ifndef CONFIG_CPU_MIPSR6
329#define LoadWU(addr, value, res) \ 555#define LoadWU(addr, value, res) \
330 __asm__ __volatile__ ( \ 556 __asm__ __volatile__ ( \
331 "1:\t"user_lwl("%0", "3(%2)")"\n" \ 557 "1:\t"user_lwl("%0", "3(%2)")"\n" \
@@ -363,6 +589,86 @@ extern void show_registers(struct pt_regs *regs);
363 ".previous" \ 589 ".previous" \
364 : "=&r" (value), "=r" (res) \ 590 : "=&r" (value), "=r" (res) \
365 : "r" (addr), "i" (-EFAULT)); 591 : "r" (addr), "i" (-EFAULT));
592#else
593/* MIPSR6 has not lwl and ldl instructions */
594#define LoadWU(addr, value, res) \
595 __asm__ __volatile__ ( \
596 ".set\tpush\n\t" \
597 ".set\tnoat\n\t" \
598 "1:"user_lbu("%0", "3(%2)")"\n\t" \
599 "2:"user_lbu("$1", "2(%2)")"\n\t" \
600 "sll\t%0, 0x8\n\t" \
601 "or\t%0, $1\n\t" \
602 "3:"user_lbu("$1", "1(%2)")"\n\t" \
603 "sll\t%0, 0x8\n\t" \
604 "or\t%0, $1\n\t" \
605 "4:"user_lbu("$1", "0(%2)")"\n\t" \
606 "sll\t%0, 0x8\n\t" \
607 "or\t%0, $1\n\t" \
608 "li\t%1, 0\n" \
609 ".set\tpop\n" \
610 "10:\n\t" \
611 ".insn\n\t" \
612 ".section\t.fixup,\"ax\"\n\t" \
613 "11:\tli\t%1, %3\n\t" \
614 "j\t10b\n\t" \
615 ".previous\n\t" \
616 ".section\t__ex_table,\"a\"\n\t" \
617 STR(PTR)"\t1b, 11b\n\t" \
618 STR(PTR)"\t2b, 11b\n\t" \
619 STR(PTR)"\t3b, 11b\n\t" \
620 STR(PTR)"\t4b, 11b\n\t" \
621 ".previous" \
622 : "=&r" (value), "=r" (res) \
623 : "r" (addr), "i" (-EFAULT));
624
625#define LoadDW(addr, value, res) \
626 __asm__ __volatile__ ( \
627 ".set\tpush\n\t" \
628 ".set\tnoat\n\t" \
629 "1:lb\t%0, 7(%2)\n\t" \
630 "2:lbu\t$1, 6(%2)\n\t" \
631 "dsll\t%0, 0x8\n\t" \
632 "or\t%0, $1\n\t" \
633 "3:lbu\t$1, 5(%2)\n\t" \
634 "dsll\t%0, 0x8\n\t" \
635 "or\t%0, $1\n\t" \
636 "4:lbu\t$1, 4(%2)\n\t" \
637 "dsll\t%0, 0x8\n\t" \
638 "or\t%0, $1\n\t" \
639 "5:lbu\t$1, 3(%2)\n\t" \
640 "dsll\t%0, 0x8\n\t" \
641 "or\t%0, $1\n\t" \
642 "6:lbu\t$1, 2(%2)\n\t" \
643 "dsll\t%0, 0x8\n\t" \
644 "or\t%0, $1\n\t" \
645 "7:lbu\t$1, 1(%2)\n\t" \
646 "dsll\t%0, 0x8\n\t" \
647 "or\t%0, $1\n\t" \
648 "8:lbu\t$1, 0(%2)\n\t" \
649 "dsll\t%0, 0x8\n\t" \
650 "or\t%0, $1\n\t" \
651 "li\t%1, 0\n" \
652 ".set\tpop\n\t" \
653 "10:\n\t" \
654 ".insn\n\t" \
655 ".section\t.fixup,\"ax\"\n\t" \
656 "11:\tli\t%1, %3\n\t" \
657 "j\t10b\n\t" \
658 ".previous\n\t" \
659 ".section\t__ex_table,\"a\"\n\t" \
660 STR(PTR)"\t1b, 11b\n\t" \
661 STR(PTR)"\t2b, 11b\n\t" \
662 STR(PTR)"\t3b, 11b\n\t" \
663 STR(PTR)"\t4b, 11b\n\t" \
664 STR(PTR)"\t5b, 11b\n\t" \
665 STR(PTR)"\t6b, 11b\n\t" \
666 STR(PTR)"\t7b, 11b\n\t" \
667 STR(PTR)"\t8b, 11b\n\t" \
668 ".previous" \
669 : "=&r" (value), "=r" (res) \
670 : "r" (addr), "i" (-EFAULT));
671#endif /* CONFIG_CPU_MIPSR6 */
366 672
367#define StoreHW(addr, value, res) \ 673#define StoreHW(addr, value, res) \
368 __asm__ __volatile__ ( \ 674 __asm__ __volatile__ ( \
@@ -384,7 +690,7 @@ extern void show_registers(struct pt_regs *regs);
384 ".previous" \ 690 ".previous" \
385 : "=r" (res) \ 691 : "=r" (res) \
386 : "r" (value), "r" (addr), "i" (-EFAULT)); 692 : "r" (value), "r" (addr), "i" (-EFAULT));
387 693#ifndef CONFIG_CPU_MIPSR6
388#define StoreW(addr, value, res) \ 694#define StoreW(addr, value, res) \
389 __asm__ __volatile__ ( \ 695 __asm__ __volatile__ ( \
390 "1:\t"user_swl("%1", "3(%2)")"\n" \ 696 "1:\t"user_swl("%1", "3(%2)")"\n" \
@@ -420,6 +726,79 @@ extern void show_registers(struct pt_regs *regs);
420 ".previous" \ 726 ".previous" \
421 : "=r" (res) \ 727 : "=r" (res) \
422 : "r" (value), "r" (addr), "i" (-EFAULT)); 728 : "r" (value), "r" (addr), "i" (-EFAULT));
729#else
730/* MIPSR6 has no swl and sdl instructions */
731#define StoreW(addr, value, res) \
732 __asm__ __volatile__ ( \
733 ".set\tpush\n\t" \
734 ".set\tnoat\n\t" \
735 "1:"user_sb("%1", "0(%2)")"\n\t" \
736 "srl\t$1, %1, 0x8\n\t" \
737 "2:"user_sb("$1", "1(%2)")"\n\t" \
738 "srl\t$1, $1, 0x8\n\t" \
739 "3:"user_sb("$1", "2(%2)")"\n\t" \
740 "srl\t$1, $1, 0x8\n\t" \
741 "4:"user_sb("$1", "3(%2)")"\n\t" \
742 ".set\tpop\n\t" \
743 "li\t%0, 0\n" \
744 "10:\n\t" \
745 ".insn\n\t" \
746 ".section\t.fixup,\"ax\"\n\t" \
747 "11:\tli\t%0, %3\n\t" \
748 "j\t10b\n\t" \
749 ".previous\n\t" \
750 ".section\t__ex_table,\"a\"\n\t" \
751 STR(PTR)"\t1b, 11b\n\t" \
752 STR(PTR)"\t2b, 11b\n\t" \
753 STR(PTR)"\t3b, 11b\n\t" \
754 STR(PTR)"\t4b, 11b\n\t" \
755 ".previous" \
756 : "=&r" (res) \
757 : "r" (value), "r" (addr), "i" (-EFAULT) \
758 : "memory");
759
760#define StoreDW(addr, value, res) \
761 __asm__ __volatile__ ( \
762 ".set\tpush\n\t" \
763 ".set\tnoat\n\t" \
764 "1:sb\t%1, 0(%2)\n\t" \
765 "dsrl\t$1, %1, 0x8\n\t" \
766 "2:sb\t$1, 1(%2)\n\t" \
767 "dsrl\t$1, $1, 0x8\n\t" \
768 "3:sb\t$1, 2(%2)\n\t" \
769 "dsrl\t$1, $1, 0x8\n\t" \
770 "4:sb\t$1, 3(%2)\n\t" \
771 "dsrl\t$1, $1, 0x8\n\t" \
772 "5:sb\t$1, 4(%2)\n\t" \
773 "dsrl\t$1, $1, 0x8\n\t" \
774 "6:sb\t$1, 5(%2)\n\t" \
775 "dsrl\t$1, $1, 0x8\n\t" \
776 "7:sb\t$1, 6(%2)\n\t" \
777 "dsrl\t$1, $1, 0x8\n\t" \
778 "8:sb\t$1, 7(%2)\n\t" \
779 "dsrl\t$1, $1, 0x8\n\t" \
780 ".set\tpop\n\t" \
781 "li\t%0, 0\n" \
782 "10:\n\t" \
783 ".insn\n\t" \
784 ".section\t.fixup,\"ax\"\n\t" \
785 "11:\tli\t%0, %3\n\t" \
786 "j\t10b\n\t" \
787 ".previous\n\t" \
788 ".section\t__ex_table,\"a\"\n\t" \
789 STR(PTR)"\t1b, 11b\n\t" \
790 STR(PTR)"\t2b, 11b\n\t" \
791 STR(PTR)"\t3b, 11b\n\t" \
792 STR(PTR)"\t4b, 11b\n\t" \
793 STR(PTR)"\t5b, 11b\n\t" \
794 STR(PTR)"\t6b, 11b\n\t" \
795 STR(PTR)"\t7b, 11b\n\t" \
796 STR(PTR)"\t8b, 11b\n\t" \
797 ".previous" \
798 : "=&r" (res) \
799 : "r" (value), "r" (addr), "i" (-EFAULT) \
800 : "memory");
801#endif /* CONFIG_CPU_MIPSR6 */
423#endif 802#endif
424 803
425static void emulate_load_store_insn(struct pt_regs *regs, 804static void emulate_load_store_insn(struct pt_regs *regs,
@@ -703,10 +1082,13 @@ static void emulate_load_store_insn(struct pt_regs *regs,
703 break; 1082 break;
704 return; 1083 return;
705 1084
1085#ifndef CONFIG_CPU_MIPSR6
706 /* 1086 /*
707 * COP2 is available to implementor for application specific use. 1087 * COP2 is available to implementor for application specific use.
708 * It's up to applications to register a notifier chain and do 1088 * It's up to applications to register a notifier chain and do
709 * whatever they have to do, including possible sending of signals. 1089 * whatever they have to do, including possible sending of signals.
1090 *
1091 * This instruction has been reallocated in Release 6
710 */ 1092 */
711 case lwc2_op: 1093 case lwc2_op:
712 cu2_notifier_call_chain(CU2_LWC2_OP, regs); 1094 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
@@ -723,7 +1105,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
723 case sdc2_op: 1105 case sdc2_op:
724 cu2_notifier_call_chain(CU2_SDC2_OP, regs); 1106 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
725 break; 1107 break;
726 1108#endif
727 default: 1109 default:
728 /* 1110 /*
729 * Pheeee... We encountered an yet unknown instruction or 1111 * Pheeee... We encountered an yet unknown instruction or
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index eeddc58802e1..1e9e900cd3c3 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -8,6 +8,7 @@ lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
8 8
9obj-y += iomap.o 9obj-y += iomap.o
10obj-$(CONFIG_PCI) += iomap-pci.o 10obj-$(CONFIG_PCI) += iomap-pci.o
11lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y))
11 12
12obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o 13obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o
13obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o 14obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 5d3238af9b5c..9245e1705e69 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -293,9 +293,14 @@
293 and t0, src, ADDRMASK 293 and t0, src, ADDRMASK
294 PREFS( 0, 2*32(src) ) 294 PREFS( 0, 2*32(src) )
295 PREFD( 1, 2*32(dst) ) 295 PREFD( 1, 2*32(dst) )
296#ifndef CONFIG_CPU_MIPSR6
296 bnez t1, .Ldst_unaligned\@ 297 bnez t1, .Ldst_unaligned\@
297 nop 298 nop
298 bnez t0, .Lsrc_unaligned_dst_aligned\@ 299 bnez t0, .Lsrc_unaligned_dst_aligned\@
300#else
301 or t0, t0, t1
302 bnez t0, .Lcopy_unaligned_bytes\@
303#endif
299 /* 304 /*
300 * use delay slot for fall-through 305 * use delay slot for fall-through
301 * src and dst are aligned; need to compute rem 306 * src and dst are aligned; need to compute rem
@@ -376,6 +381,7 @@
376 bne rem, len, 1b 381 bne rem, len, 1b
377 .set noreorder 382 .set noreorder
378 383
384#ifndef CONFIG_CPU_MIPSR6
379 /* 385 /*
380 * src and dst are aligned, need to copy rem bytes (rem < NBYTES) 386 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
381 * A loop would do only a byte at a time with possible branch 387 * A loop would do only a byte at a time with possible branch
@@ -477,6 +483,7 @@
477 bne len, rem, 1b 483 bne len, rem, 1b
478 .set noreorder 484 .set noreorder
479 485
486#endif /* !CONFIG_CPU_MIPSR6 */
480.Lcopy_bytes_checklen\@: 487.Lcopy_bytes_checklen\@:
481 beqz len, .Ldone\@ 488 beqz len, .Ldone\@
482 nop 489 nop
@@ -504,6 +511,22 @@
504.Ldone\@: 511.Ldone\@:
505 jr ra 512 jr ra
506 nop 513 nop
514
515#ifdef CONFIG_CPU_MIPSR6
516.Lcopy_unaligned_bytes\@:
5171:
518 COPY_BYTE(0)
519 COPY_BYTE(1)
520 COPY_BYTE(2)
521 COPY_BYTE(3)
522 COPY_BYTE(4)
523 COPY_BYTE(5)
524 COPY_BYTE(6)
525 COPY_BYTE(7)
526 ADD src, src, 8
527 b 1b
528 ADD dst, dst, 8
529#endif /* CONFIG_CPU_MIPSR6 */
507 .if __memcpy == 1 530 .if __memcpy == 1
508 END(memcpy) 531 END(memcpy)
509 .set __memcpy, 0 532 .set __memcpy, 0
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index c8fe6b1968fb..b8e63fd00375 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -111,6 +111,7 @@
111 .set at 111 .set at
112#endif 112#endif
113 113
114#ifndef CONFIG_CPU_MIPSR6
114 R10KCBARRIER(0(ra)) 115 R10KCBARRIER(0(ra))
115#ifdef __MIPSEB__ 116#ifdef __MIPSEB__
116 EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ 117 EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
@@ -120,6 +121,30 @@
120 PTR_SUBU a0, t0 /* long align ptr */ 121 PTR_SUBU a0, t0 /* long align ptr */
121 PTR_ADDU a2, t0 /* correct size */ 122 PTR_ADDU a2, t0 /* correct size */
122 123
124#else /* CONFIG_CPU_MIPSR6 */
125#define STORE_BYTE(N) \
126 EX(sb, a1, N(a0), .Lbyte_fixup\@); \
127 beqz t0, 0f; \
128 PTR_ADDU t0, 1;
129
130 PTR_ADDU a2, t0 /* correct size */
131 PTR_ADDU t0, 1
132 STORE_BYTE(0)
133 STORE_BYTE(1)
134#if LONGSIZE == 4
135 EX(sb, a1, 2(a0), .Lbyte_fixup\@)
136#else
137 STORE_BYTE(2)
138 STORE_BYTE(3)
139 STORE_BYTE(4)
140 STORE_BYTE(5)
141 EX(sb, a1, 6(a0), .Lbyte_fixup\@)
142#endif
1430:
144 ori a0, STORMASK
145 xori a0, STORMASK
146 PTR_ADDIU a0, STORSIZE
147#endif /* CONFIG_CPU_MIPSR6 */
1231: ori t1, a2, 0x3f /* # of full blocks */ 1481: ori t1, a2, 0x3f /* # of full blocks */
124 xori t1, 0x3f 149 xori t1, 0x3f
125 beqz t1, .Lmemset_partial\@ /* no block to fill */ 150 beqz t1, .Lmemset_partial\@ /* no block to fill */
@@ -159,6 +184,7 @@
159 andi a2, STORMASK /* At most one long to go */ 184 andi a2, STORMASK /* At most one long to go */
160 185
161 beqz a2, 1f 186 beqz a2, 1f
187#ifndef CONFIG_CPU_MIPSR6
162 PTR_ADDU a0, a2 /* What's left */ 188 PTR_ADDU a0, a2 /* What's left */
163 R10KCBARRIER(0(ra)) 189 R10KCBARRIER(0(ra))
164#ifdef __MIPSEB__ 190#ifdef __MIPSEB__
@@ -166,6 +192,22 @@
166#else 192#else
167 EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) 193 EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
168#endif 194#endif
195#else
196 PTR_SUBU t0, $0, a2
197 PTR_ADDIU t0, 1
198 STORE_BYTE(0)
199 STORE_BYTE(1)
200#if LONGSIZE == 4
201 EX(sb, a1, 2(a0), .Lbyte_fixup\@)
202#else
203 STORE_BYTE(2)
204 STORE_BYTE(3)
205 STORE_BYTE(4)
206 STORE_BYTE(5)
207 EX(sb, a1, 6(a0), .Lbyte_fixup\@)
208#endif
2090:
210#endif
1691: jr ra 2111: jr ra
170 move a2, zero 212 move a2, zero
171 213
@@ -186,6 +228,11 @@
186 .hidden __memset 228 .hidden __memset
187 .endif 229 .endif
188 230
231.Lbyte_fixup\@:
232 PTR_SUBU a2, $0, t0
233 jr ra
234 PTR_ADDIU a2, 1
235
189.Lfirst_fixup\@: 236.Lfirst_fixup\@:
190 jr ra 237 jr ra
191 nop 238 nop
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index be777d9a3f85..272af8ac2425 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -15,7 +15,7 @@
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/stringify.h> 16#include <linux/stringify.h>
17 17
18#ifndef CONFIG_CPU_MIPSR2 18#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
19 19
20/* 20/*
21 * For cli() we have to insert nops to make sure that the new value 21 * For cli() we have to insert nops to make sure that the new value
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 9dfcd7fc1bc3..b30bf65c7d7d 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -48,6 +48,7 @@
48#include <asm/processor.h> 48#include <asm/processor.h>
49#include <asm/fpu_emulator.h> 49#include <asm/fpu_emulator.h>
50#include <asm/fpu.h> 50#include <asm/fpu.h>
51#include <asm/mips-r2-to-r6-emul.h>
51 52
52#include "ieee754.h" 53#include "ieee754.h"
53 54
@@ -68,7 +69,7 @@ static int fpux_emu(struct pt_regs *,
68#define modeindex(v) ((v) & FPU_CSR_RM) 69#define modeindex(v) ((v) & FPU_CSR_RM)
69 70
70/* convert condition code register number to csr bit */ 71/* convert condition code register number to csr bit */
71static const unsigned int fpucondbit[8] = { 72const unsigned int fpucondbit[8] = {
72 FPU_CSR_COND0, 73 FPU_CSR_COND0,
73 FPU_CSR_COND1, 74 FPU_CSR_COND1,
74 FPU_CSR_COND2, 75 FPU_CSR_COND2,
@@ -448,6 +449,9 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
448 dec_insn.next_pc_inc; 449 dec_insn.next_pc_inc;
449 /* Fall through */ 450 /* Fall through */
450 case jr_op: 451 case jr_op:
452 /* For R6, JR already emulated in jalr_op */
453 if (NO_R6EMU && insn.r_format.opcode == jr_op)
454 break;
451 *contpc = regs->regs[insn.r_format.rs]; 455 *contpc = regs->regs[insn.r_format.rs];
452 return 1; 456 return 1;
453 } 457 }
@@ -456,12 +460,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
456 switch (insn.i_format.rt) { 460 switch (insn.i_format.rt) {
457 case bltzal_op: 461 case bltzal_op:
458 case bltzall_op: 462 case bltzall_op:
463 if (NO_R6EMU && (insn.i_format.rs ||
464 insn.i_format.rt == bltzall_op))
465 break;
466
459 regs->regs[31] = regs->cp0_epc + 467 regs->regs[31] = regs->cp0_epc +
460 dec_insn.pc_inc + 468 dec_insn.pc_inc +
461 dec_insn.next_pc_inc; 469 dec_insn.next_pc_inc;
462 /* Fall through */ 470 /* Fall through */
463 case bltz_op:
464 case bltzl_op: 471 case bltzl_op:
472 if (NO_R6EMU)
473 break;
474 case bltz_op:
465 if ((long)regs->regs[insn.i_format.rs] < 0) 475 if ((long)regs->regs[insn.i_format.rs] < 0)
466 *contpc = regs->cp0_epc + 476 *contpc = regs->cp0_epc +
467 dec_insn.pc_inc + 477 dec_insn.pc_inc +
@@ -473,12 +483,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
473 return 1; 483 return 1;
474 case bgezal_op: 484 case bgezal_op:
475 case bgezall_op: 485 case bgezall_op:
486 if (NO_R6EMU && (insn.i_format.rs ||
487 insn.i_format.rt == bgezall_op))
488 break;
489
476 regs->regs[31] = regs->cp0_epc + 490 regs->regs[31] = regs->cp0_epc +
477 dec_insn.pc_inc + 491 dec_insn.pc_inc +
478 dec_insn.next_pc_inc; 492 dec_insn.next_pc_inc;
479 /* Fall through */ 493 /* Fall through */
480 case bgez_op:
481 case bgezl_op: 494 case bgezl_op:
495 if (NO_R6EMU)
496 break;
497 case bgez_op:
482 if ((long)regs->regs[insn.i_format.rs] >= 0) 498 if ((long)regs->regs[insn.i_format.rs] >= 0)
483 *contpc = regs->cp0_epc + 499 *contpc = regs->cp0_epc +
484 dec_insn.pc_inc + 500 dec_insn.pc_inc +
@@ -505,8 +521,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
505 /* Set microMIPS mode bit: XOR for jalx. */ 521 /* Set microMIPS mode bit: XOR for jalx. */
506 *contpc ^= bit; 522 *contpc ^= bit;
507 return 1; 523 return 1;
508 case beq_op:
509 case beql_op: 524 case beql_op:
525 if (NO_R6EMU)
526 break;
527 case beq_op:
510 if (regs->regs[insn.i_format.rs] == 528 if (regs->regs[insn.i_format.rs] ==
511 regs->regs[insn.i_format.rt]) 529 regs->regs[insn.i_format.rt])
512 *contpc = regs->cp0_epc + 530 *contpc = regs->cp0_epc +
@@ -517,8 +535,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
517 dec_insn.pc_inc + 535 dec_insn.pc_inc +
518 dec_insn.next_pc_inc; 536 dec_insn.next_pc_inc;
519 return 1; 537 return 1;
520 case bne_op:
521 case bnel_op: 538 case bnel_op:
539 if (NO_R6EMU)
540 break;
541 case bne_op:
522 if (regs->regs[insn.i_format.rs] != 542 if (regs->regs[insn.i_format.rs] !=
523 regs->regs[insn.i_format.rt]) 543 regs->regs[insn.i_format.rt])
524 *contpc = regs->cp0_epc + 544 *contpc = regs->cp0_epc +
@@ -529,8 +549,34 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
529 dec_insn.pc_inc + 549 dec_insn.pc_inc +
530 dec_insn.next_pc_inc; 550 dec_insn.next_pc_inc;
531 return 1; 551 return 1;
532 case blez_op:
533 case blezl_op: 552 case blezl_op:
553 if (NO_R6EMU)
554 break;
555 case blez_op:
556
557 /*
558 * Compact branches for R6 for the
559 * blez and blezl opcodes.
560 * BLEZ | rs = 0 | rt != 0 == BLEZALC
561 * BLEZ | rs = rt != 0 == BGEZALC
562 * BLEZ | rs != 0 | rt != 0 == BGEUC
563 * BLEZL | rs = 0 | rt != 0 == BLEZC
564 * BLEZL | rs = rt != 0 == BGEZC
565 * BLEZL | rs != 0 | rt != 0 == BGEC
566 *
567 * For real BLEZ{,L}, rt is always 0.
568 */
569 if (cpu_has_mips_r6 && insn.i_format.rt) {
570 if ((insn.i_format.opcode == blez_op) &&
571 ((!insn.i_format.rs && insn.i_format.rt) ||
572 (insn.i_format.rs == insn.i_format.rt)))
573 regs->regs[31] = regs->cp0_epc +
574 dec_insn.pc_inc;
575 *contpc = regs->cp0_epc + dec_insn.pc_inc +
576 dec_insn.next_pc_inc;
577
578 return 1;
579 }
534 if ((long)regs->regs[insn.i_format.rs] <= 0) 580 if ((long)regs->regs[insn.i_format.rs] <= 0)
535 *contpc = regs->cp0_epc + 581 *contpc = regs->cp0_epc +
536 dec_insn.pc_inc + 582 dec_insn.pc_inc +
@@ -540,8 +586,35 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
540 dec_insn.pc_inc + 586 dec_insn.pc_inc +
541 dec_insn.next_pc_inc; 587 dec_insn.next_pc_inc;
542 return 1; 588 return 1;
543 case bgtz_op:
544 case bgtzl_op: 589 case bgtzl_op:
590 if (NO_R6EMU)
591 break;
592 case bgtz_op:
593 /*
594 * Compact branches for R6 for the
595 * bgtz and bgtzl opcodes.
596 * BGTZ | rs = 0 | rt != 0 == BGTZALC
597 * BGTZ | rs = rt != 0 == BLTZALC
598 * BGTZ | rs != 0 | rt != 0 == BLTUC
599 * BGTZL | rs = 0 | rt != 0 == BGTZC
600 * BGTZL | rs = rt != 0 == BLTZC
601 * BGTZL | rs != 0 | rt != 0 == BLTC
602 *
603 * *ZALC varint for BGTZ &&& rt != 0
604 * For real GTZ{,L}, rt is always 0.
605 */
606 if (cpu_has_mips_r6 && insn.i_format.rt) {
607 if ((insn.i_format.opcode == blez_op) &&
608 ((!insn.i_format.rs && insn.i_format.rt) ||
609 (insn.i_format.rs == insn.i_format.rt)))
610 regs->regs[31] = regs->cp0_epc +
611 dec_insn.pc_inc;
612 *contpc = regs->cp0_epc + dec_insn.pc_inc +
613 dec_insn.next_pc_inc;
614
615 return 1;
616 }
617
545 if ((long)regs->regs[insn.i_format.rs] > 0) 618 if ((long)regs->regs[insn.i_format.rs] > 0)
546 *contpc = regs->cp0_epc + 619 *contpc = regs->cp0_epc +
547 dec_insn.pc_inc + 620 dec_insn.pc_inc +
@@ -551,6 +624,16 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
551 dec_insn.pc_inc + 624 dec_insn.pc_inc +
552 dec_insn.next_pc_inc; 625 dec_insn.next_pc_inc;
553 return 1; 626 return 1;
627 case cbcond0_op:
628 case cbcond1_op:
629 if (!cpu_has_mips_r6)
630 break;
631 if (insn.i_format.rt && !insn.i_format.rs)
632 regs->regs[31] = regs->cp0_epc + 4;
633 *contpc = regs->cp0_epc + dec_insn.pc_inc +
634 dec_insn.next_pc_inc;
635
636 return 1;
554#ifdef CONFIG_CPU_CAVIUM_OCTEON 637#ifdef CONFIG_CPU_CAVIUM_OCTEON
555 case lwc2_op: /* This is bbit0 on Octeon */ 638 case lwc2_op: /* This is bbit0 on Octeon */
556 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0) 639 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
@@ -576,9 +659,73 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
576 else 659 else
577 *contpc = regs->cp0_epc + 8; 660 *contpc = regs->cp0_epc + 8;
578 return 1; 661 return 1;
662#else
663 case bc6_op:
664 /*
665 * Only valid for MIPS R6 but we can still end up
666 * here from a broken userland so just tell emulator
667 * this is not a branch and let it break later on.
668 */
669 if (!cpu_has_mips_r6)
670 break;
671 *contpc = regs->cp0_epc + dec_insn.pc_inc +
672 dec_insn.next_pc_inc;
673
674 return 1;
675 case balc6_op:
676 if (!cpu_has_mips_r6)
677 break;
678 regs->regs[31] = regs->cp0_epc + 4;
679 *contpc = regs->cp0_epc + dec_insn.pc_inc +
680 dec_insn.next_pc_inc;
681
682 return 1;
683 case beqzcjic_op:
684 if (!cpu_has_mips_r6)
685 break;
686 *contpc = regs->cp0_epc + dec_insn.pc_inc +
687 dec_insn.next_pc_inc;
688
689 return 1;
690 case bnezcjialc_op:
691 if (!cpu_has_mips_r6)
692 break;
693 if (!insn.i_format.rs)
694 regs->regs[31] = regs->cp0_epc + 4;
695 *contpc = regs->cp0_epc + dec_insn.pc_inc +
696 dec_insn.next_pc_inc;
697
698 return 1;
579#endif 699#endif
580 case cop0_op: 700 case cop0_op:
581 case cop1_op: 701 case cop1_op:
702 /* Need to check for R6 bc1nez and bc1eqz branches */
703 if (cpu_has_mips_r6 &&
704 ((insn.i_format.rs == bc1eqz_op) ||
705 (insn.i_format.rs == bc1nez_op))) {
706 bit = 0;
707 switch (insn.i_format.rs) {
708 case bc1eqz_op:
709 if (get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)
710 bit = 1;
711 break;
712 case bc1nez_op:
713 if (!(get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1))
714 bit = 1;
715 break;
716 }
717 if (bit)
718 *contpc = regs->cp0_epc +
719 dec_insn.pc_inc +
720 (insn.i_format.simmediate << 2);
721 else
722 *contpc = regs->cp0_epc +
723 dec_insn.pc_inc +
724 dec_insn.next_pc_inc;
725
726 return 1;
727 }
728 /* R2/R6 compatible cop1 instruction. Fall through */
582 case cop2_op: 729 case cop2_op:
583 case cop1x_op: 730 case cop1x_op:
584 if (insn.i_format.rs == bc_op) { 731 if (insn.i_format.rs == bc_op) {
@@ -1414,14 +1561,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1414 * achieve full IEEE-754 accuracy - however this emulator does. 1561 * achieve full IEEE-754 accuracy - however this emulator does.
1415 */ 1562 */
1416 case frsqrt_op: 1563 case frsqrt_op:
1417 if (!cpu_has_mips_4_5_r2) 1564 if (!cpu_has_mips_4_5_r2_r6)
1418 return SIGILL; 1565 return SIGILL;
1419 1566
1420 handler.u = fpemu_sp_rsqrt; 1567 handler.u = fpemu_sp_rsqrt;
1421 goto scopuop; 1568 goto scopuop;
1422 1569
1423 case frecip_op: 1570 case frecip_op:
1424 if (!cpu_has_mips_4_5_r2) 1571 if (!cpu_has_mips_4_5_r2_r6)
1425 return SIGILL; 1572 return SIGILL;
1426 1573
1427 handler.u = fpemu_sp_recip; 1574 handler.u = fpemu_sp_recip;
@@ -1616,13 +1763,13 @@ copcsr:
1616 * achieve full IEEE-754 accuracy - however this emulator does. 1763 * achieve full IEEE-754 accuracy - however this emulator does.
1617 */ 1764 */
1618 case frsqrt_op: 1765 case frsqrt_op:
1619 if (!cpu_has_mips_4_5_r2) 1766 if (!cpu_has_mips_4_5_r2_r6)
1620 return SIGILL; 1767 return SIGILL;
1621 1768
1622 handler.u = fpemu_dp_rsqrt; 1769 handler.u = fpemu_dp_rsqrt;
1623 goto dcopuop; 1770 goto dcopuop;
1624 case frecip_op: 1771 case frecip_op:
1625 if (!cpu_has_mips_4_5_r2) 1772 if (!cpu_has_mips_4_5_r2_r6)
1626 return SIGILL; 1773 return SIGILL;
1627 1774
1628 handler.u = fpemu_dp_recip; 1775 handler.u = fpemu_dp_recip;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index dd261df005c2..3f8059602765 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -794,7 +794,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
794 __asm__ __volatile__ ( 794 __asm__ __volatile__ (
795 ".set push\n\t" 795 ".set push\n\t"
796 ".set noat\n\t" 796 ".set noat\n\t"
797 ".set mips3\n\t" 797 ".set "MIPS_ISA_LEVEL"\n\t"
798#ifdef CONFIG_32BIT 798#ifdef CONFIG_32BIT
799 "la $at,1f\n\t" 799 "la $at,1f\n\t"
800#endif 800#endif
@@ -1255,6 +1255,7 @@ static void probe_pcache(void)
1255 case CPU_P5600: 1255 case CPU_P5600:
1256 case CPU_PROAPTIV: 1256 case CPU_PROAPTIV:
1257 case CPU_M5150: 1257 case CPU_M5150:
1258 case CPU_QEMU_GENERIC:
1258 if (!(read_c0_config7() & MIPS_CONF7_IAR) && 1259 if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
1259 (c->icache.waysize > PAGE_SIZE)) 1260 (c->icache.waysize > PAGE_SIZE))
1260 c->icache.flags |= MIPS_CACHE_ALIASES; 1261 c->icache.flags |= MIPS_CACHE_ALIASES;
@@ -1472,7 +1473,8 @@ static void setup_scache(void)
1472 1473
1473 default: 1474 default:
1474 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 1475 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
1475 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { 1476 MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
1477 MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
1476#ifdef CONFIG_MIPS_CPU_SCACHE 1478#ifdef CONFIG_MIPS_CPU_SCACHE
1477 if (mips_sc_init ()) { 1479 if (mips_sc_init ()) {
1478 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; 1480 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 70ab5d664332..7ff8637e530d 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -14,6 +14,7 @@
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/ptrace.h> 16#include <linux/ptrace.h>
17#include <linux/ratelimit.h>
17#include <linux/mman.h> 18#include <linux/mman.h>
18#include <linux/mm.h> 19#include <linux/mm.h>
19#include <linux/smp.h> 20#include <linux/smp.h>
@@ -28,6 +29,8 @@
28#include <asm/highmem.h> /* For VMALLOC_END */ 29#include <asm/highmem.h> /* For VMALLOC_END */
29#include <linux/kdebug.h> 30#include <linux/kdebug.h>
30 31
32int show_unhandled_signals = 1;
33
31/* 34/*
32 * This routine handles page faults. It determines the address, 35 * This routine handles page faults. It determines the address,
33 * and the problem, and then passes it off to one of the appropriate 36 * and the problem, and then passes it off to one of the appropriate
@@ -44,6 +47,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
44 int fault; 47 int fault;
45 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 48 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
46 49
50 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
51
47#if 0 52#if 0
48 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), 53 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
49 current->comm, current->pid, field, address, write, 54 current->comm, current->pid, field, address, write,
@@ -203,15 +208,21 @@ bad_area_nosemaphore:
203 if (user_mode(regs)) { 208 if (user_mode(regs)) {
204 tsk->thread.cp0_badvaddr = address; 209 tsk->thread.cp0_badvaddr = address;
205 tsk->thread.error_code = write; 210 tsk->thread.error_code = write;
206#if 0 211 if (show_unhandled_signals &&
207 printk("do_page_fault() #2: sending SIGSEGV to %s for " 212 unhandled_signal(tsk, SIGSEGV) &&
208 "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n", 213 __ratelimit(&ratelimit_state)) {
209 tsk->comm, 214 pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx",
210 write ? "write access to" : "read access from", 215 tsk->comm,
211 field, address, 216 write ? "write access to" : "read access from",
212 field, (unsigned long) regs->cp0_epc, 217 field, address);
213 field, (unsigned long) regs->regs[31]); 218 pr_info("epc = %0*lx in", field,
214#endif 219 (unsigned long) regs->cp0_epc);
220 print_vma_addr(" ", regs->cp0_epc);
221 pr_info("ra = %0*lx in", field,
222 (unsigned long) regs->regs[31]);
223 print_vma_addr(" ", regs->regs[31]);
224 pr_info("\n");
225 }
215 info.si_signo = SIGSEGV; 226 info.si_signo = SIGSEGV;
216 info.si_errno = 0; 227 info.si_errno = 0;
217 /* info.si_code has been set above */ 228 /* info.si_code has been set above */
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index b611102e23b5..3f85f921801b 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -72,6 +72,20 @@ static struct uasm_reloc relocs[5];
72#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) 72#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
73#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) 73#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
74 74
75/*
76 * R6 has a limited offset of the pref instruction.
77 * Skip it if the offset is more than 9 bits.
78 */
79#define _uasm_i_pref(a, b, c, d) \
80do { \
81 if (cpu_has_mips_r6) { \
82 if (c <= 0xff && c >= -0x100) \
83 uasm_i_pref(a, b, c, d);\
84 } else { \
85 uasm_i_pref(a, b, c, d); \
86 } \
87} while(0)
88
75static int pref_bias_clear_store; 89static int pref_bias_clear_store;
76static int pref_bias_copy_load; 90static int pref_bias_copy_load;
77static int pref_bias_copy_store; 91static int pref_bias_copy_store;
@@ -178,7 +192,15 @@ static void set_prefetch_parameters(void)
178 pref_bias_copy_load = 256; 192 pref_bias_copy_load = 256;
179 pref_bias_copy_store = 128; 193 pref_bias_copy_store = 128;
180 pref_src_mode = Pref_LoadStreamed; 194 pref_src_mode = Pref_LoadStreamed;
181 pref_dst_mode = Pref_PrepareForStore; 195 if (cpu_has_mips_r6)
196 /*
197 * Bit 30 (Pref_PrepareForStore) has been
198 * removed from MIPS R6. Use bit 5
199 * (Pref_StoreStreamed).
200 */
201 pref_dst_mode = Pref_StoreStreamed;
202 else
203 pref_dst_mode = Pref_PrepareForStore;
182 break; 204 break;
183 } 205 }
184 } else { 206 } else {
@@ -214,7 +236,7 @@ static inline void build_clear_pref(u32 **buf, int off)
214 return; 236 return;
215 237
216 if (pref_bias_clear_store) { 238 if (pref_bias_clear_store) {
217 uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, 239 _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
218 A0); 240 A0);
219 } else if (cache_line_size == (half_clear_loop_size << 1)) { 241 } else if (cache_line_size == (half_clear_loop_size << 1)) {
220 if (cpu_has_cache_cdex_s) { 242 if (cpu_has_cache_cdex_s) {
@@ -357,7 +379,7 @@ static inline void build_copy_load_pref(u32 **buf, int off)
357 return; 379 return;
358 380
359 if (pref_bias_copy_load) 381 if (pref_bias_copy_load)
360 uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1); 382 _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
361} 383}
362 384
363static inline void build_copy_store_pref(u32 **buf, int off) 385static inline void build_copy_store_pref(u32 **buf, int off)
@@ -366,7 +388,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
366 return; 388 return;
367 389
368 if (pref_bias_copy_store) { 390 if (pref_bias_copy_store) {
369 uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, 391 _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
370 A0); 392 A0);
371 } else if (cache_line_size == (half_copy_loop_size << 1)) { 393 } else if (cache_line_size == (half_copy_loop_size << 1)) {
372 if (cpu_has_cache_cdex_s) { 394 if (cpu_has_cache_cdex_s) {
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 99eb8fabab60..4ceafd13870c 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -81,6 +81,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
81 case CPU_PROAPTIV: 81 case CPU_PROAPTIV:
82 case CPU_P5600: 82 case CPU_P5600:
83 case CPU_BMIPS5000: 83 case CPU_BMIPS5000:
84 case CPU_QEMU_GENERIC:
84 if (config2 & (1 << 12)) 85 if (config2 & (1 << 12))
85 return 0; 86 return 0;
86 } 87 }
@@ -104,7 +105,8 @@ static inline int __init mips_sc_probe(void)
104 105
105 /* Ignore anything but MIPSxx processors */ 106 /* Ignore anything but MIPSxx processors */
106 if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 107 if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
107 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) 108 MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
109 MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)))
108 return 0; 110 return 0;
109 111
110 /* Does this MIPS32/MIPS64 CPU have a config2 register? */ 112 /* Does this MIPS32/MIPS64 CPU have a config2 register? */
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 30639a6e9b8c..b2afa49beab0 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -485,13 +485,11 @@ static void r4k_tlb_configure(void)
485 * Enable the no read, no exec bits, and enable large virtual 485 * Enable the no read, no exec bits, and enable large virtual
486 * address. 486 * address.
487 */ 487 */
488 u32 pg = PG_RIE | PG_XIE;
489#ifdef CONFIG_64BIT 488#ifdef CONFIG_64BIT
490 pg |= PG_ELPA; 489 set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
490#else
491 set_c0_pagegrain(PG_RIE | PG_XIE);
491#endif 492#endif
492 if (cpu_has_rixiex)
493 pg |= PG_IEC;
494 write_c0_pagegrain(pg);
495 } 493 }
496 494
497 temp_tlb_entry = current_cpu_data.tlbsize - 1; 495 temp_tlb_entry = current_cpu_data.tlbsize - 1;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 3978a3d81366..d75ff73a2012 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -501,7 +501,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
501 case tlb_indexed: tlbw = uasm_i_tlbwi; break; 501 case tlb_indexed: tlbw = uasm_i_tlbwi; break;
502 } 502 }
503 503
504 if (cpu_has_mips_r2) { 504 if (cpu_has_mips_r2_exec_hazard) {
505 /* 505 /*
506 * The architecture spec says an ehb is required here, 506 * The architecture spec says an ehb is required here,
507 * but a number of cores do not have the hazard and 507 * but a number of cores do not have the hazard and
@@ -514,6 +514,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
514 case CPU_PROAPTIV: 514 case CPU_PROAPTIV:
515 case CPU_P5600: 515 case CPU_P5600:
516 case CPU_M5150: 516 case CPU_M5150:
517 case CPU_QEMU_GENERIC:
517 break; 518 break;
518 519
519 default: 520 default:
@@ -1952,7 +1953,7 @@ static void build_r4000_tlb_load_handler(void)
1952 1953
1953 switch (current_cpu_type()) { 1954 switch (current_cpu_type()) {
1954 default: 1955 default:
1955 if (cpu_has_mips_r2) { 1956 if (cpu_has_mips_r2_exec_hazard) {
1956 uasm_i_ehb(&p); 1957 uasm_i_ehb(&p);
1957 1958
1958 case CPU_CAVIUM_OCTEON: 1959 case CPU_CAVIUM_OCTEON:
@@ -2019,7 +2020,7 @@ static void build_r4000_tlb_load_handler(void)
2019 2020
2020 switch (current_cpu_type()) { 2021 switch (current_cpu_type()) {
2021 default: 2022 default:
2022 if (cpu_has_mips_r2) { 2023 if (cpu_has_mips_r2_exec_hazard) {
2023 uasm_i_ehb(&p); 2024 uasm_i_ehb(&p);
2024 2025
2025 case CPU_CAVIUM_OCTEON: 2026 case CPU_CAVIUM_OCTEON:
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 8399ddf03a02..d78178daea4b 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -38,14 +38,6 @@
38 | (e) << RE_SH \ 38 | (e) << RE_SH \
39 | (f) << FUNC_SH) 39 | (f) << FUNC_SH)
40 40
41/* Define these when we are not the ISA the kernel is being compiled with. */
42#ifndef CONFIG_CPU_MICROMIPS
43#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
44#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
45#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
46#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
47#endif
48
49#include "uasm.c" 41#include "uasm.c"
50 42
51static struct insn insn_table_MM[] = { 43static struct insn insn_table_MM[] = {
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 8e02291cfc0c..b4a837893562 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -38,13 +38,13 @@
38 | (e) << RE_SH \ 38 | (e) << RE_SH \
39 | (f) << FUNC_SH) 39 | (f) << FUNC_SH)
40 40
41/* Define these when we are not the ISA the kernel is being compiled with. */ 41/* This macro sets the non-variable bits of an R6 instruction. */
42#ifdef CONFIG_CPU_MICROMIPS 42#define M6(a, b, c, d, e) \
43#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) 43 ((a) << OP_SH \
44#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) 44 | (b) << RS_SH \
45#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) 45 | (c) << RT_SH \
46#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) 46 | (d) << SIMM9_SH \
47#endif 47 | (e) << FUNC_SH)
48 48
49#include "uasm.c" 49#include "uasm.c"
50 50
@@ -62,7 +62,11 @@ static struct insn insn_table[] = {
62 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, 62 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
63 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, 63 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
64 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 64 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
65#ifndef CONFIG_CPU_MIPSR6
65 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 66 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
67#else
68 { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
69#endif
66 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 70 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
67 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, 71 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
68 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, 72 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
@@ -85,13 +89,22 @@ static struct insn insn_table[] = {
85 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, 89 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
86 { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD }, 90 { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD },
87 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 91 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
92#ifndef CONFIG_CPU_MIPSR6
88 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, 93 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
94#else
95 { insn_jr, M(spec_op, 0, 0, 0, 0, jalr_op), RS },
96#endif
89 { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 97 { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
90 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 98 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
91 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, 99 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
92 { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 100 { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
101#ifndef CONFIG_CPU_MIPSR6
93 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 102 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
94 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 103 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
104#else
105 { insn_lld, M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9 },
106 { insn_ll, M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9 },
107#endif
95 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, 108 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
96 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 109 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
97 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, 110 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
@@ -104,11 +117,20 @@ static struct insn insn_table[] = {
104 { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, 117 { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
105 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 118 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
106 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, 119 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
120#ifndef CONFIG_CPU_MIPSR6
107 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 121 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
122#else
123 { insn_pref, M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9 },
124#endif
108 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, 125 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
109 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, 126 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
127#ifndef CONFIG_CPU_MIPSR6
110 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 128 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
111 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 129 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
130#else
131 { insn_scd, M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9 },
132 { insn_sc, M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9 },
133#endif
112 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 134 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
113 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 135 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
114 { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, 136 { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD },
@@ -198,6 +220,8 @@ static void build_insn(u32 **buf, enum opcode opc, ...)
198 op |= build_set(va_arg(ap, u32)); 220 op |= build_set(va_arg(ap, u32));
199 if (ip->fields & SCIMM) 221 if (ip->fields & SCIMM)
200 op |= build_scimm(va_arg(ap, u32)); 222 op |= build_scimm(va_arg(ap, u32));
223 if (ip->fields & SIMM9)
224 op |= build_scimm9(va_arg(ap, u32));
201 va_end(ap); 225 va_end(ap);
202 226
203 **buf = op; 227 **buf = op;
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 4adf30284813..319051c34343 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -24,7 +24,8 @@ enum fields {
24 JIMM = 0x080, 24 JIMM = 0x080,
25 FUNC = 0x100, 25 FUNC = 0x100,
26 SET = 0x200, 26 SET = 0x200,
27 SCIMM = 0x400 27 SCIMM = 0x400,
28 SIMM9 = 0x800,
28}; 29};
29 30
30#define OP_MASK 0x3f 31#define OP_MASK 0x3f
@@ -41,6 +42,8 @@ enum fields {
41#define FUNC_SH 0 42#define FUNC_SH 0
42#define SET_MASK 0x7 43#define SET_MASK 0x7
43#define SET_SH 0 44#define SET_SH 0
45#define SIMM9_SH 7
46#define SIMM9_MASK 0x1ff
44 47
45enum opcode { 48enum opcode {
46 insn_invalid, 49 insn_invalid,
@@ -116,6 +119,14 @@ static inline u32 build_scimm(u32 arg)
116 return (arg & SCIMM_MASK) << SCIMM_SH; 119 return (arg & SCIMM_MASK) << SCIMM_SH;
117} 120}
118 121
122static inline u32 build_scimm9(s32 arg)
123{
124 WARN((arg > 0xff || arg < -0x100),
125 KERN_WARNING "Micro-assembler field overflow\n");
126
127 return (arg & SIMM9_MASK) << SIMM9_SH;
128}
129
119static inline u32 build_func(u32 arg) 130static inline u32 build_func(u32 arg)
120{ 131{
121 WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 132 WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
@@ -330,7 +341,7 @@ I_u3u1u2(_ldx)
330void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, 341void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
331 unsigned int c) 342 unsigned int c)
332{ 343{
333 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) 344 if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5)
334 /* 345 /*
335 * As per erratum Core-14449, replace prefetches 0-4, 346 * As per erratum Core-14449, replace prefetches 0-4,
336 * 6-24 with 'pref 28'. 347 * 6-24 with 'pref 28'.
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index ec1dd2491f96..e1d69895fb1d 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -72,7 +72,7 @@ void read_persistent_clock(struct timespec *ts)
72int get_c0_perfcount_int(void) 72int get_c0_perfcount_int(void)
73{ 73{
74 if (gic_present) 74 if (gic_present)
75 return gic_get_c0_compare_int(); 75 return gic_get_c0_perfcount_int();
76 if (cp0_perfcount_irq >= 0) 76 if (cp0_perfcount_irq >= 0)
77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
78 return -1; 78 return -1;
diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c
index f2355e3e65a1..f97e169393bc 100644
--- a/arch/mips/pci/pci-bcm1480.c
+++ b/arch/mips/pci/pci-bcm1480.c
@@ -173,8 +173,8 @@ static int bcm1480_pcibios_write(struct pci_bus *bus, unsigned int devfn,
173} 173}
174 174
175struct pci_ops bcm1480_pci_ops = { 175struct pci_ops bcm1480_pci_ops = {
176 .read = bcm1480_pcibios_read, 176 .read = bcm1480_pcibios_read,
177 .write = bcm1480_pcibios_write, 177 .write = bcm1480_pcibios_write,
178}; 178};
179 179
180static struct resource bcm1480_mem_resource = { 180static struct resource bcm1480_mem_resource = {
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index bedb72bd3a27..a04af55d89f1 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
327 327
328 328
329static struct pci_ops octeon_pci_ops = { 329static struct pci_ops octeon_pci_ops = {
330 .read = octeon_read_config, 330 .read = octeon_read_config,
331 .write = octeon_write_config, 331 .write = octeon_write_config,
332}; 332};
333 333
334static struct resource octeon_pci_mem_resource = { 334static struct resource octeon_pci_mem_resource = {
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index eb4a17ba4a53..1bb0b2bf8d6e 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
1792} 1792}
1793 1793
1794static struct pci_ops octeon_pcie0_ops = { 1794static struct pci_ops octeon_pcie0_ops = {
1795 .read = octeon_pcie0_read_config, 1795 .read = octeon_pcie0_read_config,
1796 .write = octeon_pcie0_write_config, 1796 .write = octeon_pcie0_write_config,
1797}; 1797};
1798 1798
1799static struct resource octeon_pcie0_mem_resource = { 1799static struct resource octeon_pcie0_mem_resource = {
@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
1813}; 1813};
1814 1814
1815static struct pci_ops octeon_pcie1_ops = { 1815static struct pci_ops octeon_pcie1_ops = {
1816 .read = octeon_pcie1_read_config, 1816 .read = octeon_pcie1_read_config,
1817 .write = octeon_pcie1_write_config, 1817 .write = octeon_pcie1_write_config,
1818}; 1818};
1819 1819
1820static struct resource octeon_pcie1_mem_resource = { 1820static struct resource octeon_pcie1_mem_resource = {
@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
1834}; 1834};
1835 1835
1836static struct pci_ops octeon_dummy_ops = { 1836static struct pci_ops octeon_dummy_ops = {
1837 .read = octeon_dummy_read_config, 1837 .read = octeon_dummy_read_config,
1838 .write = octeon_dummy_write_config, 1838 .write = octeon_dummy_write_config,
1839}; 1839};
1840 1840
1841static struct resource octeon_dummy_mem_resource = { 1841static struct resource octeon_dummy_mem_resource = {
diff --git a/arch/mips/pmcs-msp71xx/Kconfig b/arch/mips/pmcs-msp71xx/Kconfig
index 6073ca456d11..4190093d3053 100644
--- a/arch/mips/pmcs-msp71xx/Kconfig
+++ b/arch/mips/pmcs-msp71xx/Kconfig
@@ -36,14 +36,14 @@ config PMC_MSP7120_FPGA
36endchoice 36endchoice
37 37
38config MSP_HAS_USB 38config MSP_HAS_USB
39 boolean 39 bool
40 depends on PMC_MSP 40 depends on PMC_MSP
41 41
42config MSP_ETH 42config MSP_ETH
43 boolean 43 bool
44 select MSP_HAS_MAC 44 select MSP_HAS_MAC
45 depends on PMC_MSP 45 depends on PMC_MSP
46 46
47config MSP_HAS_MAC 47config MSP_HAS_MAC
48 boolean 48 bool
49 depends on PMC_MSP 49 depends on PMC_MSP
diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
index 8f1b86d4da84..cdf187600010 100644
--- a/arch/mips/sgi-ip22/ip22-gio.c
+++ b/arch/mips/sgi-ip22/ip22-gio.c
@@ -152,28 +152,6 @@ static int gio_device_remove(struct device *dev)
152 return 0; 152 return 0;
153} 153}
154 154
155static int gio_device_suspend(struct device *dev, pm_message_t state)
156{
157 struct gio_device *gio_dev = to_gio_device(dev);
158 struct gio_driver *drv = to_gio_driver(dev->driver);
159 int error = 0;
160
161 if (dev->driver && drv->suspend)
162 error = drv->suspend(gio_dev, state);
163 return error;
164}
165
166static int gio_device_resume(struct device *dev)
167{
168 struct gio_device *gio_dev = to_gio_device(dev);
169 struct gio_driver *drv = to_gio_driver(dev->driver);
170 int error = 0;
171
172 if (dev->driver && drv->resume)
173 error = drv->resume(gio_dev);
174 return error;
175}
176
177static void gio_device_shutdown(struct device *dev) 155static void gio_device_shutdown(struct device *dev)
178{ 156{
179 struct gio_device *gio_dev = to_gio_device(dev); 157 struct gio_device *gio_dev = to_gio_device(dev);
@@ -400,8 +378,6 @@ static struct bus_type gio_bus_type = {
400 .match = gio_bus_match, 378 .match = gio_bus_match,
401 .probe = gio_device_probe, 379 .probe = gio_device_probe,
402 .remove = gio_device_remove, 380 .remove = gio_device_remove,
403 .suspend = gio_device_suspend,
404 .resume = gio_device_resume,
405 .shutdown = gio_device_shutdown, 381 .shutdown = gio_device_shutdown,
406 .uevent = gio_device_uevent, 382 .uevent = gio_device_uevent,
407}; 383};
diff --git a/arch/mips/sgi-ip27/ip27-reset.c b/arch/mips/sgi-ip27/ip27-reset.c
index ac37e54b3d5e..e44a15d4f573 100644
--- a/arch/mips/sgi-ip27/ip27-reset.c
+++ b/arch/mips/sgi-ip27/ip27-reset.c
@@ -8,6 +8,7 @@
8 * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle 8 * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle
9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 */ 10 */
11#include <linux/compiler.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/timer.h> 14#include <linux/timer.h>
@@ -25,9 +26,9 @@
25#include <asm/sn/gda.h> 26#include <asm/sn/gda.h>
26#include <asm/sn/sn0/hub.h> 27#include <asm/sn/sn0/hub.h>
27 28
28void machine_restart(char *command) __attribute__((noreturn)); 29void machine_restart(char *command) __noreturn;
29void machine_halt(void) __attribute__((noreturn)); 30void machine_halt(void) __noreturn;
30void machine_power_off(void) __attribute__((noreturn)); 31void machine_power_off(void) __noreturn;
31 32
32#define noreturn while(1); /* Silence gcc. */ 33#define noreturn while(1); /* Silence gcc. */
33 34
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index 1f823da4c77b..44b3470a0bbb 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -8,6 +8,7 @@
8 * Copyright (C) 2003 Guido Guenther <agx@sigxcpu.org> 8 * Copyright (C) 2003 Guido Guenther <agx@sigxcpu.org>
9 */ 9 */
10 10
11#include <linux/compiler.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
@@ -35,9 +36,9 @@
35static struct timer_list power_timer, blink_timer, debounce_timer; 36static struct timer_list power_timer, blink_timer, debounce_timer;
36static int has_panicked, shuting_down; 37static int has_panicked, shuting_down;
37 38
38static void ip32_machine_restart(char *command) __attribute__((noreturn)); 39static void ip32_machine_restart(char *command) __noreturn;
39static void ip32_machine_halt(void) __attribute__((noreturn)); 40static void ip32_machine_halt(void) __noreturn;
40static void ip32_machine_power_off(void) __attribute__((noreturn)); 41static void ip32_machine_power_off(void) __noreturn;
41 42
42static void ip32_machine_restart(char *cmd) 43static void ip32_machine_restart(char *cmd)
43{ 44{
diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h
index afab728ab65e..96d3f9deb59c 100644
--- a/arch/mn10300/include/asm/pgtable.h
+++ b/arch/mn10300/include/asm/pgtable.h
@@ -56,7 +56,9 @@ extern void paging_init(void);
56#define PGDIR_SHIFT 22 56#define PGDIR_SHIFT 22
57#define PTRS_PER_PGD 1024 57#define PTRS_PER_PGD 1024
58#define PTRS_PER_PUD 1 /* we don't really have any PUD physically */ 58#define PTRS_PER_PUD 1 /* we don't really have any PUD physically */
59#define __PAGETABLE_PUD_FOLDED
59#define PTRS_PER_PMD 1 /* we don't really have any PMD physically */ 60#define PTRS_PER_PMD 1 /* we don't really have any PMD physically */
61#define __PAGETABLE_PMD_FOLDED
60#define PTRS_PER_PTE 1024 62#define PTRS_PER_PTE 1024
61 63
62#define PGD_SIZE PAGE_SIZE 64#define PGD_SIZE PAGE_SIZE
diff --git a/arch/mn10300/unit-asb2305/pci-iomap.c b/arch/mn10300/unit-asb2305/pci-iomap.c
deleted file mode 100644
index bd65dae17f32..000000000000
--- a/arch/mn10300/unit-asb2305/pci-iomap.c
+++ /dev/null
@@ -1,35 +0,0 @@
1/* ASB2305 PCI I/O mapping handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/pci.h>
12#include <linux/module.h>
13
14/*
15 * Create a virtual mapping cookie for a PCI BAR (memory or IO)
16 */
17void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
18{
19 resource_size_t start = pci_resource_start(dev, bar);
20 resource_size_t len = pci_resource_len(dev, bar);
21 unsigned long flags = pci_resource_flags(dev, bar);
22
23 if (!len || !start)
24 return NULL;
25
26 if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM)) {
27 if (flags & IORESOURCE_CACHEABLE && !(flags & IORESOURCE_IO))
28 return ioremap(start, len);
29 else
30 return ioremap_nocache(start, len);
31 }
32
33 return NULL;
34}
35EXPORT_SYMBOL(pci_iomap);
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index ab2e7a198a4c..a6bd07ca3d6c 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -192,7 +192,7 @@ struct __large_struct {
192({ \ 192({ \
193 long __gu_err, __gu_val; \ 193 long __gu_err, __gu_val; \
194 __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 194 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
195 (x) = (__typeof__(*(ptr)))__gu_val; \ 195 (x) = (__force __typeof__(*(ptr)))__gu_val; \
196 __gu_err; \ 196 __gu_err; \
197}) 197})
198 198
@@ -202,7 +202,7 @@ struct __large_struct {
202 const __typeof__(*(ptr)) * __gu_addr = (ptr); \ 202 const __typeof__(*(ptr)) * __gu_addr = (ptr); \
203 if (access_ok(VERIFY_READ, __gu_addr, size)) \ 203 if (access_ok(VERIFY_READ, __gu_addr, size)) \
204 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 204 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
205 (x) = (__typeof__(*(ptr)))__gu_val; \ 205 (x) = (__force __typeof__(*(ptr)))__gu_val; \
206 __gu_err; \ 206 __gu_err; \
207}) 207})
208 208
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 91fbb6ee702c..965a0999fc4c 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -148,7 +148,7 @@ endef
148# we require gcc 3.3 or above to compile the kernel 148# we require gcc 3.3 or above to compile the kernel
149archprepare: checkbin 149archprepare: checkbin
150checkbin: 150checkbin:
151 @if test "$(call cc-version)" -lt "0303"; then \ 151 @if test "$(cc-version)" -lt "0303"; then \
152 echo -n "Sorry, GCC v3.3 or above is required to build " ; \ 152 echo -n "Sorry, GCC v3.3 or above is required to build " ; \
153 echo "the kernel." ; \ 153 echo "the kernel." ; \
154 false ; \ 154 false ; \
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 8c966b2270aa..15207b9362bf 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -96,6 +96,7 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
96#if PT_NLEVELS == 3 96#if PT_NLEVELS == 3
97#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) 97#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
98#else 98#else
99#define __PAGETABLE_PMD_FOLDED
99#define BITS_PER_PMD 0 100#define BITS_PER_PMD 0
100#endif 101#endif
101#define PTRS_PER_PMD (1UL << BITS_PER_PMD) 102#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 132d9c681d6a..fc502e042438 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -314,7 +314,7 @@ TOUT := .tmp_gas_check
314# - Require gcc 4.0 or above on 64-bit 314# - Require gcc 4.0 or above on 64-bit
315# - gcc-4.2.0 has issues compiling modules on 64-bit 315# - gcc-4.2.0 has issues compiling modules on 64-bit
316checkbin: 316checkbin:
317 @if test "$(call cc-version)" = "0304" ; then \ 317 @if test "$(cc-version)" = "0304" ; then \
318 if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \ 318 if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
319 echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \ 319 echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
320 echo 'correctly with gcc-3.4 and your version of binutils.'; \ 320 echo 'correctly with gcc-3.4 and your version of binutils.'; \
@@ -322,13 +322,13 @@ checkbin:
322 false; \ 322 false; \
323 fi ; \ 323 fi ; \
324 fi 324 fi
325 @if test "$(call cc-version)" -lt "0400" \ 325 @if test "$(cc-version)" -lt "0400" \
326 && test "x${CONFIG_PPC64}" = "xy" ; then \ 326 && test "x${CONFIG_PPC64}" = "xy" ; then \
327 echo -n "Sorry, GCC v4.0 or above is required to build " ; \ 327 echo -n "Sorry, GCC v4.0 or above is required to build " ; \
328 echo "the 64-bit powerpc kernel." ; \ 328 echo "the 64-bit powerpc kernel." ; \
329 false ; \ 329 false ; \
330 fi 330 fi
331 @if test "$(call cc-fullversion)" = "040200" \ 331 @if test "$(cc-fullversion)" = "040200" \
332 && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \ 332 && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \
333 echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \ 333 echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
334 echo 'kernel with modules enabled.' ; \ 334 echo 'kernel with modules enabled.' ; \
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index 51866f170684..ca7957b09a3c 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -142,6 +142,7 @@ CONFIG_VIRT_DRIVERS=y
142CONFIG_FSL_HV_MANAGER=y 142CONFIG_FSL_HV_MANAGER=y
143CONFIG_STAGING=y 143CONFIG_STAGING=y
144CONFIG_FSL_CORENET_CF=y 144CONFIG_FSL_CORENET_CF=y
145CONFIG_CLK_QORIQ=y
145CONFIG_EXT2_FS=y 146CONFIG_EXT2_FS=y
146CONFIG_EXT3_FS=y 147CONFIG_EXT3_FS=y
147# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 148# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index d6c0c8198952..04737aaa8b6b 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -122,6 +122,7 @@ CONFIG_DMADEVICES=y
122CONFIG_FSL_DMA=y 122CONFIG_FSL_DMA=y
123CONFIG_VIRT_DRIVERS=y 123CONFIG_VIRT_DRIVERS=y
124CONFIG_FSL_HV_MANAGER=y 124CONFIG_FSL_HV_MANAGER=y
125CONFIG_CLK_QORIQ=y
125CONFIG_FSL_CORENET_CF=y 126CONFIG_FSL_CORENET_CF=y
126CONFIG_EXT2_FS=y 127CONFIG_EXT2_FS=y
127CONFIG_EXT3_FS=y 128CONFIG_EXT3_FS=y
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 7316dd15278a..2d7b33fab953 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -54,6 +54,7 @@
54#include <linux/irq.h> 54#include <linux/irq.h>
55#include <linux/delay.h> 55#include <linux/delay.h>
56#include <linux/irq_work.h> 56#include <linux/irq_work.h>
57#include <linux/clk-provider.h>
57#include <asm/trace.h> 58#include <asm/trace.h>
58 59
59#include <asm/io.h> 60#include <asm/io.h>
@@ -975,6 +976,10 @@ void __init time_init(void)
975 976
976 init_decrementer_clockevent(); 977 init_decrementer_clockevent();
977 tick_setup_hrtimer_broadcast(); 978 tick_setup_hrtimer_broadcast();
979
980#ifdef CONFIG_COMMON_CLK
981 of_clk_init(NULL);
982#endif
978} 983}
979 984
980 985
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index 6eb614a271fb..f691bcabd710 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -1168,6 +1168,11 @@ static void mpc5121_clk_provide_backwards_compat(void)
1168 } 1168 }
1169} 1169}
1170 1170
1171/*
1172 * The "fixed-clock" nodes (which includes the oscillator node if the board's
1173 * DT provides one) has already been scanned by the of_clk_init() in
1174 * time_init().
1175 */
1171int __init mpc5121_clk_init(void) 1176int __init mpc5121_clk_init(void)
1172{ 1177{
1173 struct device_node *clk_np; 1178 struct device_node *clk_np;
@@ -1187,12 +1192,6 @@ int __init mpc5121_clk_init(void)
1187 mpc512x_clk_preset_data(); 1192 mpc512x_clk_preset_data();
1188 1193
1189 /* 1194 /*
1190 * have the device tree scanned for "fixed-clock" nodes (which
1191 * includes the oscillator node if the board's DT provides one)
1192 */
1193 of_clk_init(NULL);
1194
1195 /*
1196 * add a dummy clock for those situations where a clock spec is 1195 * add a dummy clock for those situations where a clock spec is
1197 * required yet no real clock is involved 1196 * required yet no real clock is involved
1198 */ 1197 */
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 4c8008dd938e..99824ff8dd35 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -74,7 +74,7 @@ static void hypfs_remove(struct dentry *dentry)
74 parent = dentry->d_parent; 74 parent = dentry->d_parent;
75 mutex_lock(&parent->d_inode->i_mutex); 75 mutex_lock(&parent->d_inode->i_mutex);
76 if (hypfs_positive(dentry)) { 76 if (hypfs_positive(dentry)) {
77 if (S_ISDIR(dentry->d_inode->i_mode)) 77 if (d_is_dir(dentry))
78 simple_rmdir(parent->d_inode, dentry); 78 simple_rmdir(parent->d_inode, dentry);
79 else 79 else
80 simple_unlink(parent->d_inode, dentry); 80 simple_unlink(parent->d_inode, dentry);
@@ -144,36 +144,32 @@ static int hypfs_open(struct inode *inode, struct file *filp)
144 return nonseekable_open(inode, filp); 144 return nonseekable_open(inode, filp);
145} 145}
146 146
147static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov, 147static ssize_t hypfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
148 unsigned long nr_segs, loff_t offset)
149{ 148{
150 char *data; 149 struct file *file = iocb->ki_filp;
151 ssize_t ret; 150 char *data = file->private_data;
152 struct file *filp = iocb->ki_filp; 151 size_t available = strlen(data);
153 /* XXX: temporary */ 152 loff_t pos = iocb->ki_pos;
154 char __user *buf = iov[0].iov_base; 153 size_t count;
155 size_t count = iov[0].iov_len;
156
157 if (nr_segs != 1)
158 return -EINVAL;
159
160 data = filp->private_data;
161 ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data));
162 if (ret <= 0)
163 return ret;
164 154
165 iocb->ki_pos += ret; 155 if (pos < 0)
166 file_accessed(filp); 156 return -EINVAL;
167 157 if (pos >= available || !iov_iter_count(to))
168 return ret; 158 return 0;
159 count = copy_to_iter(data + pos, available - pos, to);
160 if (!count)
161 return -EFAULT;
162 iocb->ki_pos = pos + count;
163 file_accessed(file);
164 return count;
169} 165}
170static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, 166
171 unsigned long nr_segs, loff_t offset) 167static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
172{ 168{
173 int rc; 169 int rc;
174 struct super_block *sb = file_inode(iocb->ki_filp)->i_sb; 170 struct super_block *sb = file_inode(iocb->ki_filp)->i_sb;
175 struct hypfs_sb_info *fs_info = sb->s_fs_info; 171 struct hypfs_sb_info *fs_info = sb->s_fs_info;
176 size_t count = iov_length(iov, nr_segs); 172 size_t count = iov_iter_count(from);
177 173
178 /* 174 /*
179 * Currently we only allow one update per second for two reasons: 175 * Currently we only allow one update per second for two reasons:
@@ -202,6 +198,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
202 } 198 }
203 hypfs_update_update(sb); 199 hypfs_update_update(sb);
204 rc = count; 200 rc = count;
201 iov_iter_advance(from, count);
205out: 202out:
206 mutex_unlock(&fs_info->lock); 203 mutex_unlock(&fs_info->lock);
207 return rc; 204 return rc;
@@ -440,10 +437,10 @@ struct dentry *hypfs_create_str(struct dentry *dir,
440static const struct file_operations hypfs_file_ops = { 437static const struct file_operations hypfs_file_ops = {
441 .open = hypfs_open, 438 .open = hypfs_open,
442 .release = hypfs_release, 439 .release = hypfs_release,
443 .read = do_sync_read, 440 .read = new_sync_read,
444 .write = do_sync_write, 441 .write = new_sync_write,
445 .aio_read = hypfs_aio_read, 442 .read_iter = hypfs_read_iter,
446 .aio_write = hypfs_aio_write, 443 .write_iter = hypfs_write_iter,
447 .llseek = no_llseek, 444 .llseek = no_llseek,
448}; 445};
449 446
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index f664e96f48c7..1a9a98de5bde 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -16,6 +16,7 @@
16struct zpci_iomap_entry { 16struct zpci_iomap_entry {
17 u32 fh; 17 u32 fh;
18 u8 bar; 18 u8 bar;
19 u16 count;
19}; 20};
20 21
21extern struct zpci_iomap_entry *zpci_iomap_start; 22extern struct zpci_iomap_entry *zpci_iomap_start;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index fbb5ee3ae57c..e08ec38f8c6e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -91,7 +91,9 @@ extern unsigned long zero_page_mask;
91 */ 91 */
92#define PTRS_PER_PTE 256 92#define PTRS_PER_PTE 256
93#ifndef CONFIG_64BIT 93#ifndef CONFIG_64BIT
94#define __PAGETABLE_PUD_FOLDED
94#define PTRS_PER_PMD 1 95#define PTRS_PER_PMD 1
96#define __PAGETABLE_PMD_FOLDED
95#define PTRS_PER_PUD 1 97#define PTRS_PER_PUD 1
96#else /* CONFIG_64BIT */ 98#else /* CONFIG_64BIT */
97#define PTRS_PER_PMD 2048 99#define PTRS_PER_PMD 2048
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index c4fbb9527c5c..b1453a2ae1ca 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -18,15 +18,15 @@ struct cpu_topology_s390 {
18 cpumask_t book_mask; 18 cpumask_t book_mask;
19}; 19};
20 20
21extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; 21DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
22 22
23#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) 23#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
24#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) 24#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
25#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_mask) 25#define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask)
26#define topology_core_id(cpu) (cpu_topology[cpu].core_id) 26#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
27#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask) 27#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
28#define topology_book_id(cpu) (cpu_topology[cpu].book_id) 28#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
29#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask) 29#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
30 30
31#define mc_capable() 1 31#define mc_capable() 1
32 32
@@ -51,14 +51,6 @@ static inline void topology_expect_change(void) { }
51#define POLARIZATION_VM (2) 51#define POLARIZATION_VM (2)
52#define POLARIZATION_VH (3) 52#define POLARIZATION_VH (3)
53 53
54#ifdef CONFIG_SCHED_BOOK
55void s390_init_cpu_topology(void);
56#else
57static inline void s390_init_cpu_topology(void)
58{
59};
60#endif
61
62#include <asm-generic/topology.h> 54#include <asm-generic/topology.h>
63 55
64#endif /* _ASM_S390_TOPOLOGY_H */ 56#endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 632fa06ea162..0969d113b3d6 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -91,12 +91,9 @@ static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
91{ 91{
92 if (level >= CACHE_MAX_LEVEL) 92 if (level >= CACHE_MAX_LEVEL)
93 return CACHE_TYPE_NOCACHE; 93 return CACHE_TYPE_NOCACHE;
94
95 ci += level; 94 ci += level;
96
97 if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE) 95 if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
98 return CACHE_TYPE_NOCACHE; 96 return CACHE_TYPE_NOCACHE;
99
100 return cache_type_map[ci->type]; 97 return cache_type_map[ci->type];
101} 98}
102 99
@@ -111,23 +108,19 @@ static inline unsigned long ecag(int ai, int li, int ti)
111} 108}
112 109
113static void ci_leaf_init(struct cacheinfo *this_leaf, int private, 110static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
114 enum cache_type type, unsigned int level) 111 enum cache_type type, unsigned int level, int cpu)
115{ 112{
116 int ti, num_sets; 113 int ti, num_sets;
117 int cpu = smp_processor_id();
118 114
119 if (type == CACHE_TYPE_INST) 115 if (type == CACHE_TYPE_INST)
120 ti = CACHE_TI_INSTRUCTION; 116 ti = CACHE_TI_INSTRUCTION;
121 else 117 else
122 ti = CACHE_TI_UNIFIED; 118 ti = CACHE_TI_UNIFIED;
123
124 this_leaf->level = level + 1; 119 this_leaf->level = level + 1;
125 this_leaf->type = type; 120 this_leaf->type = type;
126 this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti); 121 this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
127 this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, 122 this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
128 level, ti);
129 this_leaf->size = ecag(EXTRACT_SIZE, level, ti); 123 this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
130
131 num_sets = this_leaf->size / this_leaf->coherency_line_size; 124 num_sets = this_leaf->size / this_leaf->coherency_line_size;
132 num_sets /= this_leaf->ways_of_associativity; 125 num_sets /= this_leaf->ways_of_associativity;
133 this_leaf->number_of_sets = num_sets; 126 this_leaf->number_of_sets = num_sets;
@@ -145,7 +138,6 @@ int init_cache_level(unsigned int cpu)
145 138
146 if (!this_cpu_ci) 139 if (!this_cpu_ci)
147 return -EINVAL; 140 return -EINVAL;
148
149 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); 141 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
150 do { 142 do {
151 ctype = get_cache_type(&ct.ci[0], level); 143 ctype = get_cache_type(&ct.ci[0], level);
@@ -154,34 +146,31 @@ int init_cache_level(unsigned int cpu)
154 /* Separate instruction and data caches */ 146 /* Separate instruction and data caches */
155 leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; 147 leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
156 } while (++level < CACHE_MAX_LEVEL); 148 } while (++level < CACHE_MAX_LEVEL);
157
158 this_cpu_ci->num_levels = level; 149 this_cpu_ci->num_levels = level;
159 this_cpu_ci->num_leaves = leaves; 150 this_cpu_ci->num_leaves = leaves;
160
161 return 0; 151 return 0;
162} 152}
163 153
164int populate_cache_leaves(unsigned int cpu) 154int populate_cache_leaves(unsigned int cpu)
165{ 155{
156 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
157 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
166 unsigned int level, idx, pvt; 158 unsigned int level, idx, pvt;
167 union cache_topology ct; 159 union cache_topology ct;
168 enum cache_type ctype; 160 enum cache_type ctype;
169 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
170 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
171 161
172 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); 162 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
173 for (idx = 0, level = 0; level < this_cpu_ci->num_levels && 163 for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
174 idx < this_cpu_ci->num_leaves; idx++, level++) { 164 idx < this_cpu_ci->num_leaves; idx++, level++) {
175 if (!this_leaf) 165 if (!this_leaf)
176 return -EINVAL; 166 return -EINVAL;
177
178 pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0; 167 pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
179 ctype = get_cache_type(&ct.ci[0], level); 168 ctype = get_cache_type(&ct.ci[0], level);
180 if (ctype == CACHE_TYPE_SEPARATE) { 169 if (ctype == CACHE_TYPE_SEPARATE) {
181 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level); 170 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
182 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level); 171 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
183 } else { 172 } else {
184 ci_leaf_init(this_leaf++, pvt, ctype, level); 173 ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
185 } 174 }
186 } 175 }
187 return 0; 176 return 0;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 70a329450901..4427ab7ac23a 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -393,17 +393,19 @@ static __init void detect_machine_facilities(void)
393 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; 393 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
394 if (test_facility(129)) 394 if (test_facility(129))
395 S390_lowcore.machine_flags |= MACHINE_FLAG_VX; 395 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
396 if (test_facility(128))
397 S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
398#endif 396#endif
399} 397}
400 398
401static int __init nocad_setup(char *str) 399static int __init cad_setup(char *str)
402{ 400{
403 S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD; 401 int val;
402
403 get_option(&str, &val);
404 if (val && test_facility(128))
405 S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
404 return 0; 406 return 0;
405} 407}
406early_param("nocad", nocad_setup); 408early_param("cad", cad_setup);
407 409
408static int __init cad_init(void) 410static int __init cad_init(void)
409{ 411{
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index bfac77ada4f2..a5ea8bc17cb3 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -909,7 +909,6 @@ void __init setup_arch(char **cmdline_p)
909 setup_lowcore(); 909 setup_lowcore();
910 smp_fill_possible_mask(); 910 smp_fill_possible_mask();
911 cpu_init(); 911 cpu_init();
912 s390_init_cpu_topology();
913 912
914 /* 913 /*
915 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). 914 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index a668993ff577..db8f1115a3bf 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -59,14 +59,13 @@ enum {
59 CPU_STATE_CONFIGURED, 59 CPU_STATE_CONFIGURED,
60}; 60};
61 61
62static DEFINE_PER_CPU(struct cpu *, cpu_device);
63
62struct pcpu { 64struct pcpu {
63 struct cpu *cpu;
64 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ 65 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
65 unsigned long async_stack; /* async stack for the cpu */
66 unsigned long panic_stack; /* panic stack for the cpu */
67 unsigned long ec_mask; /* bit mask for ec_xxx functions */ 66 unsigned long ec_mask; /* bit mask for ec_xxx functions */
68 int state; /* physical cpu state */ 67 signed char state; /* physical cpu state */
69 int polarization; /* physical polarization */ 68 signed char polarization; /* physical polarization */
70 u16 address; /* physical cpu address */ 69 u16 address; /* physical cpu address */
71}; 70};
72 71
@@ -173,25 +172,30 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
173 pcpu_sigp_retry(pcpu, order, 0); 172 pcpu_sigp_retry(pcpu, order, 0);
174} 173}
175 174
175#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
176#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
177
176static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) 178static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
177{ 179{
180 unsigned long async_stack, panic_stack;
178 struct _lowcore *lc; 181 struct _lowcore *lc;
179 182
180 if (pcpu != &pcpu_devices[0]) { 183 if (pcpu != &pcpu_devices[0]) {
181 pcpu->lowcore = (struct _lowcore *) 184 pcpu->lowcore = (struct _lowcore *)
182 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 185 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
183 pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); 186 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
184 pcpu->panic_stack = __get_free_page(GFP_KERNEL); 187 panic_stack = __get_free_page(GFP_KERNEL);
185 if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack) 188 if (!pcpu->lowcore || !panic_stack || !async_stack)
186 goto out; 189 goto out;
190 } else {
191 async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
192 panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
187 } 193 }
188 lc = pcpu->lowcore; 194 lc = pcpu->lowcore;
189 memcpy(lc, &S390_lowcore, 512); 195 memcpy(lc, &S390_lowcore, 512);
190 memset((char *) lc + 512, 0, sizeof(*lc) - 512); 196 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
191 lc->async_stack = pcpu->async_stack + ASYNC_SIZE 197 lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
192 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 198 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
193 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
194 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
195 lc->cpu_nr = cpu; 199 lc->cpu_nr = cpu;
196 lc->spinlock_lockval = arch_spin_lockval(cpu); 200 lc->spinlock_lockval = arch_spin_lockval(cpu);
197#ifndef CONFIG_64BIT 201#ifndef CONFIG_64BIT
@@ -212,8 +216,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
212 return 0; 216 return 0;
213out: 217out:
214 if (pcpu != &pcpu_devices[0]) { 218 if (pcpu != &pcpu_devices[0]) {
215 free_page(pcpu->panic_stack); 219 free_page(panic_stack);
216 free_pages(pcpu->async_stack, ASYNC_ORDER); 220 free_pages(async_stack, ASYNC_ORDER);
217 free_pages((unsigned long) pcpu->lowcore, LC_ORDER); 221 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
218 } 222 }
219 return -ENOMEM; 223 return -ENOMEM;
@@ -235,11 +239,11 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
235#else 239#else
236 vdso_free_per_cpu(pcpu->lowcore); 240 vdso_free_per_cpu(pcpu->lowcore);
237#endif 241#endif
238 if (pcpu != &pcpu_devices[0]) { 242 if (pcpu == &pcpu_devices[0])
239 free_page(pcpu->panic_stack); 243 return;
240 free_pages(pcpu->async_stack, ASYNC_ORDER); 244 free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
241 free_pages((unsigned long) pcpu->lowcore, LC_ORDER); 245 free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
242 } 246 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
243} 247}
244 248
245#endif /* CONFIG_HOTPLUG_CPU */ 249#endif /* CONFIG_HOTPLUG_CPU */
@@ -366,7 +370,8 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
366void smp_call_ipl_cpu(void (*func)(void *), void *data) 370void smp_call_ipl_cpu(void (*func)(void *), void *data)
367{ 371{
368 pcpu_delegate(&pcpu_devices[0], func, data, 372 pcpu_delegate(&pcpu_devices[0], func, data,
369 pcpu_devices->panic_stack + PAGE_SIZE); 373 pcpu_devices->lowcore->panic_stack -
374 PANIC_FRAME_OFFSET + PAGE_SIZE);
370} 375}
371 376
372int smp_find_processor_id(u16 address) 377int smp_find_processor_id(u16 address)
@@ -935,10 +940,6 @@ void __init smp_prepare_boot_cpu(void)
935 pcpu->state = CPU_STATE_CONFIGURED; 940 pcpu->state = CPU_STATE_CONFIGURED;
936 pcpu->address = stap(); 941 pcpu->address = stap();
937 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); 942 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
938 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
939 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
940 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
941 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
942 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 943 S390_lowcore.percpu_offset = __per_cpu_offset[0];
943 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 944 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
944 set_cpu_present(0, true); 945 set_cpu_present(0, true);
@@ -1078,8 +1079,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
1078 void *hcpu) 1079 void *hcpu)
1079{ 1080{
1080 unsigned int cpu = (unsigned int)(long)hcpu; 1081 unsigned int cpu = (unsigned int)(long)hcpu;
1081 struct cpu *c = pcpu_devices[cpu].cpu; 1082 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1082 struct device *s = &c->dev;
1083 int err = 0; 1083 int err = 0;
1084 1084
1085 switch (action & ~CPU_TASKS_FROZEN) { 1085 switch (action & ~CPU_TASKS_FROZEN) {
@@ -1102,7 +1102,7 @@ static int smp_add_present_cpu(int cpu)
1102 c = kzalloc(sizeof(*c), GFP_KERNEL); 1102 c = kzalloc(sizeof(*c), GFP_KERNEL);
1103 if (!c) 1103 if (!c)
1104 return -ENOMEM; 1104 return -ENOMEM;
1105 pcpu_devices[cpu].cpu = c; 1105 per_cpu(cpu_device, cpu) = c;
1106 s = &c->dev; 1106 s = &c->dev;
1107 c->hotpluggable = 1; 1107 c->hotpluggable = 1;
1108 rc = register_cpu(c, cpu); 1108 rc = register_cpu(c, cpu);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 24ee33f1af24..14da43b801d9 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -7,14 +7,14 @@
7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8 8
9#include <linux/workqueue.h> 9#include <linux/workqueue.h>
10#include <linux/bootmem.h>
11#include <linux/cpuset.h> 10#include <linux/cpuset.h>
12#include <linux/device.h> 11#include <linux/device.h>
13#include <linux/export.h> 12#include <linux/export.h>
14#include <linux/kernel.h> 13#include <linux/kernel.h>
15#include <linux/sched.h> 14#include <linux/sched.h>
16#include <linux/init.h>
17#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/cpu.h> 18#include <linux/cpu.h>
19#include <linux/smp.h> 19#include <linux/smp.h>
20#include <linux/mm.h> 20#include <linux/mm.h>
@@ -42,8 +42,8 @@ static DEFINE_SPINLOCK(topology_lock);
42static struct mask_info socket_info; 42static struct mask_info socket_info;
43static struct mask_info book_info; 43static struct mask_info book_info;
44 44
45struct cpu_topology_s390 cpu_topology[NR_CPUS]; 45DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
46EXPORT_SYMBOL_GPL(cpu_topology); 46EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
47 47
48static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) 48static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
49{ 49{
@@ -90,15 +90,15 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
90 if (lcpu < 0) 90 if (lcpu < 0)
91 continue; 91 continue;
92 for (i = 0; i <= smp_cpu_mtid; i++) { 92 for (i = 0; i <= smp_cpu_mtid; i++) {
93 cpu_topology[lcpu + i].book_id = book->id; 93 per_cpu(cpu_topology, lcpu + i).book_id = book->id;
94 cpu_topology[lcpu + i].core_id = rcore; 94 per_cpu(cpu_topology, lcpu + i).core_id = rcore;
95 cpu_topology[lcpu + i].thread_id = lcpu + i; 95 per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i;
96 cpumask_set_cpu(lcpu + i, &book->mask); 96 cpumask_set_cpu(lcpu + i, &book->mask);
97 cpumask_set_cpu(lcpu + i, &socket->mask); 97 cpumask_set_cpu(lcpu + i, &socket->mask);
98 if (one_socket_per_cpu) 98 if (one_socket_per_cpu)
99 cpu_topology[lcpu + i].socket_id = rcore; 99 per_cpu(cpu_topology, lcpu + i).socket_id = rcore;
100 else 100 else
101 cpu_topology[lcpu + i].socket_id = socket->id; 101 per_cpu(cpu_topology, lcpu + i).socket_id = socket->id;
102 smp_cpu_set_polarization(lcpu + i, tl_core->pp); 102 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
103 } 103 }
104 if (one_socket_per_cpu) 104 if (one_socket_per_cpu)
@@ -249,14 +249,14 @@ static void update_cpu_masks(void)
249 249
250 spin_lock_irqsave(&topology_lock, flags); 250 spin_lock_irqsave(&topology_lock, flags);
251 for_each_possible_cpu(cpu) { 251 for_each_possible_cpu(cpu) {
252 cpu_topology[cpu].thread_mask = cpu_thread_map(cpu); 252 per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu);
253 cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu); 253 per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu);
254 cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu); 254 per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu);
255 if (!MACHINE_HAS_TOPOLOGY) { 255 if (!MACHINE_HAS_TOPOLOGY) {
256 cpu_topology[cpu].thread_id = cpu; 256 per_cpu(cpu_topology, cpu).thread_id = cpu;
257 cpu_topology[cpu].core_id = cpu; 257 per_cpu(cpu_topology, cpu).core_id = cpu;
258 cpu_topology[cpu].socket_id = cpu; 258 per_cpu(cpu_topology, cpu).socket_id = cpu;
259 cpu_topology[cpu].book_id = cpu; 259 per_cpu(cpu_topology, cpu).book_id = cpu;
260 } 260 }
261 } 261 }
262 spin_unlock_irqrestore(&topology_lock, flags); 262 spin_unlock_irqrestore(&topology_lock, flags);
@@ -334,50 +334,6 @@ void topology_expect_change(void)
334 set_topology_timer(); 334 set_topology_timer();
335} 335}
336 336
337static int __init early_parse_topology(char *p)
338{
339 if (strncmp(p, "off", 3))
340 return 0;
341 topology_enabled = 0;
342 return 0;
343}
344early_param("topology", early_parse_topology);
345
346static void __init alloc_masks(struct sysinfo_15_1_x *info,
347 struct mask_info *mask, int offset)
348{
349 int i, nr_masks;
350
351 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
352 for (i = 0; i < info->mnest - offset; i++)
353 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
354 nr_masks = max(nr_masks, 1);
355 for (i = 0; i < nr_masks; i++) {
356 mask->next = alloc_bootmem_align(
357 roundup_pow_of_two(sizeof(struct mask_info)),
358 roundup_pow_of_two(sizeof(struct mask_info)));
359 mask = mask->next;
360 }
361}
362
363void __init s390_init_cpu_topology(void)
364{
365 struct sysinfo_15_1_x *info;
366 int i;
367
368 if (!MACHINE_HAS_TOPOLOGY)
369 return;
370 tl_info = alloc_bootmem_pages(PAGE_SIZE);
371 info = tl_info;
372 store_topology(info);
373 pr_info("The CPU configuration topology of the machine is:");
374 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
375 printk(KERN_CONT " %d", info->mag[i]);
376 printk(KERN_CONT " / %d\n", info->mnest);
377 alloc_masks(info, &socket_info, 1);
378 alloc_masks(info, &book_info, 2);
379}
380
381static int cpu_management; 337static int cpu_management;
382 338
383static ssize_t dispatching_show(struct device *dev, 339static ssize_t dispatching_show(struct device *dev,
@@ -467,20 +423,29 @@ int topology_cpu_init(struct cpu *cpu)
467 423
468const struct cpumask *cpu_thread_mask(int cpu) 424const struct cpumask *cpu_thread_mask(int cpu)
469{ 425{
470 return &cpu_topology[cpu].thread_mask; 426 return &per_cpu(cpu_topology, cpu).thread_mask;
471} 427}
472 428
473 429
474const struct cpumask *cpu_coregroup_mask(int cpu) 430const struct cpumask *cpu_coregroup_mask(int cpu)
475{ 431{
476 return &cpu_topology[cpu].core_mask; 432 return &per_cpu(cpu_topology, cpu).core_mask;
477} 433}
478 434
479static const struct cpumask *cpu_book_mask(int cpu) 435static const struct cpumask *cpu_book_mask(int cpu)
480{ 436{
481 return &cpu_topology[cpu].book_mask; 437 return &per_cpu(cpu_topology, cpu).book_mask;
482} 438}
483 439
440static int __init early_parse_topology(char *p)
441{
442 if (strncmp(p, "off", 3))
443 return 0;
444 topology_enabled = 0;
445 return 0;
446}
447early_param("topology", early_parse_topology);
448
484static struct sched_domain_topology_level s390_topology[] = { 449static struct sched_domain_topology_level s390_topology[] = {
485 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 450 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
486 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 451 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
@@ -489,6 +454,42 @@ static struct sched_domain_topology_level s390_topology[] = {
489 { NULL, }, 454 { NULL, },
490}; 455};
491 456
457static void __init alloc_masks(struct sysinfo_15_1_x *info,
458 struct mask_info *mask, int offset)
459{
460 int i, nr_masks;
461
462 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
463 for (i = 0; i < info->mnest - offset; i++)
464 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
465 nr_masks = max(nr_masks, 1);
466 for (i = 0; i < nr_masks; i++) {
467 mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
468 mask = mask->next;
469 }
470}
471
472static int __init s390_topology_init(void)
473{
474 struct sysinfo_15_1_x *info;
475 int i;
476
477 if (!MACHINE_HAS_TOPOLOGY)
478 return 0;
479 tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
480 info = tl_info;
481 store_topology(info);
482 pr_info("The CPU configuration topology of the machine is:");
483 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
484 printk(KERN_CONT " %d", info->mag[i]);
485 printk(KERN_CONT " / %d\n", info->mnest);
486 alloc_masks(info, &socket_info, 1);
487 alloc_masks(info, &book_info, 2);
488 set_sched_topology(s390_topology);
489 return 0;
490}
491early_initcall(s390_topology_init);
492
492static int __init topology_init(void) 493static int __init topology_init(void)
493{ 494{
494 if (MACHINE_HAS_TOPOLOGY) 495 if (MACHINE_HAS_TOPOLOGY)
@@ -498,10 +499,3 @@ static int __init topology_init(void)
498 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); 499 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
499} 500}
500device_initcall(topology_init); 501device_initcall(topology_init);
501
502static int __init early_topology_init(void)
503{
504 set_sched_topology(s390_topology);
505 return 0;
506}
507early_initcall(early_topology_init);
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 7699e735ae28..61541fb93dc6 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -25,9 +25,7 @@ __kernel_clock_gettime:
25 je 4f 25 je 4f
26 cghi %r2,__CLOCK_REALTIME 26 cghi %r2,__CLOCK_REALTIME
27 je 5f 27 je 5f
28 cghi %r2,__CLOCK_THREAD_CPUTIME_ID 28 cghi %r2,-3 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
29 je 9f
30 cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
31 je 9f 29 je 9f
32 cghi %r2,__CLOCK_MONOTONIC_COARSE 30 cghi %r2,__CLOCK_MONOTONIC_COARSE
33 je 3f 31 je 3f
@@ -106,7 +104,7 @@ __kernel_clock_gettime:
106 aghi %r15,16 104 aghi %r15,16
107 br %r14 105 br %r14
108 106
109 /* CLOCK_THREAD_CPUTIME_ID for this thread */ 107 /* CPUCLOCK_VIRT for this thread */
1109: icm %r0,15,__VDSO_ECTG_OK(%r5) 1089: icm %r0,15,__VDSO_ECTG_OK(%r5)
111 jz 12f 109 jz 12f
112 ear %r2,%a4 110 ear %r2,%a4
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index d008f638b2cd..179a2c20b01f 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -183,7 +183,10 @@ unsigned long randomize_et_dyn(void)
183{ 183{
184 unsigned long base; 184 unsigned long base;
185 185
186 base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT); 186 base = STACK_TOP / 3 * 2;
187 if (!is_32bit_task())
188 /* Align to 4GB */
189 base &= ~((1UL << 32) - 1);
187 return base + mmap_rnd(); 190 return base + mmap_rnd();
188} 191}
189 192
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 3290f11ae1d9..753a56731951 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -259,7 +259,10 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
259} 259}
260 260
261/* Create a virtual mapping cookie for a PCI BAR */ 261/* Create a virtual mapping cookie for a PCI BAR */
262void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) 262void __iomem *pci_iomap_range(struct pci_dev *pdev,
263 int bar,
264 unsigned long offset,
265 unsigned long max)
263{ 266{
264 struct zpci_dev *zdev = get_zdev(pdev); 267 struct zpci_dev *zdev = get_zdev(pdev);
265 u64 addr; 268 u64 addr;
@@ -270,14 +273,27 @@ void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
270 273
271 idx = zdev->bars[bar].map_idx; 274 idx = zdev->bars[bar].map_idx;
272 spin_lock(&zpci_iomap_lock); 275 spin_lock(&zpci_iomap_lock);
273 zpci_iomap_start[idx].fh = zdev->fh; 276 if (zpci_iomap_start[idx].count++) {
274 zpci_iomap_start[idx].bar = bar; 277 BUG_ON(zpci_iomap_start[idx].fh != zdev->fh ||
278 zpci_iomap_start[idx].bar != bar);
279 } else {
280 zpci_iomap_start[idx].fh = zdev->fh;
281 zpci_iomap_start[idx].bar = bar;
282 }
283 /* Detect overrun */
284 BUG_ON(!zpci_iomap_start[idx].count);
275 spin_unlock(&zpci_iomap_lock); 285 spin_unlock(&zpci_iomap_lock);
276 286
277 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); 287 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
278 return (void __iomem *) addr; 288 return (void __iomem *) addr + offset;
279} 289}
280EXPORT_SYMBOL_GPL(pci_iomap); 290EXPORT_SYMBOL_GPL(pci_iomap_range);
291
292void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
293{
294 return pci_iomap_range(dev, bar, 0, maxlen);
295}
296EXPORT_SYMBOL(pci_iomap);
281 297
282void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) 298void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
283{ 299{
@@ -285,8 +301,12 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
285 301
286 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; 302 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
287 spin_lock(&zpci_iomap_lock); 303 spin_lock(&zpci_iomap_lock);
288 zpci_iomap_start[idx].fh = 0; 304 /* Detect underrun */
289 zpci_iomap_start[idx].bar = 0; 305 BUG_ON(!zpci_iomap_start[idx].count);
306 if (!--zpci_iomap_start[idx].count) {
307 zpci_iomap_start[idx].fh = 0;
308 zpci_iomap_start[idx].bar = 0;
309 }
290 spin_unlock(&zpci_iomap_lock); 310 spin_unlock(&zpci_iomap_lock);
291} 311}
292EXPORT_SYMBOL_GPL(pci_iounmap); 312EXPORT_SYMBOL_GPL(pci_iounmap);
diff --git a/arch/sh/include/asm/segment.h b/arch/sh/include/asm/segment.h
index 5e2725f4ac49..ff795d3a6909 100644
--- a/arch/sh/include/asm/segment.h
+++ b/arch/sh/include/asm/segment.h
@@ -23,7 +23,7 @@ typedef struct {
23#define USER_DS KERNEL_DS 23#define USER_DS KERNEL_DS
24#endif 24#endif
25 25
26#define segment_eq(a,b) ((a).seg == (b).seg) 26#define segment_eq(a, b) ((a).seg == (b).seg)
27 27
28#define get_ds() (KERNEL_DS) 28#define get_ds() (KERNEL_DS)
29 29
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index 9486376605f4..a49635c51266 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -60,7 +60,7 @@ struct __large_struct { unsigned long buf[100]; };
60 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 60 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
61 __chk_user_ptr(ptr); \ 61 __chk_user_ptr(ptr); \
62 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 62 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
63 (x) = (__typeof__(*(ptr)))__gu_val; \ 63 (x) = (__force __typeof__(*(ptr)))__gu_val; \
64 __gu_err; \ 64 __gu_err; \
65}) 65})
66 66
@@ -71,7 +71,7 @@ struct __large_struct { unsigned long buf[100]; };
71 const __typeof__(*(ptr)) *__gu_addr = (ptr); \ 71 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
72 if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \ 72 if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \
73 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 73 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
74 (x) = (__typeof__(*(ptr)))__gu_val; \ 74 (x) = (__force __typeof__(*(ptr)))__gu_val; \
75 __gu_err; \ 75 __gu_err; \
76}) 76})
77 77
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
index 2e07e0f40c6a..c01376c76b86 100644
--- a/arch/sh/include/asm/uaccess_64.h
+++ b/arch/sh/include/asm/uaccess_64.h
@@ -59,19 +59,19 @@ do { \
59 switch (size) { \ 59 switch (size) { \
60 case 1: \ 60 case 1: \
61 retval = __put_user_asm_b((void *)&x, \ 61 retval = __put_user_asm_b((void *)&x, \
62 (long)ptr); \ 62 (__force long)ptr); \
63 break; \ 63 break; \
64 case 2: \ 64 case 2: \
65 retval = __put_user_asm_w((void *)&x, \ 65 retval = __put_user_asm_w((void *)&x, \
66 (long)ptr); \ 66 (__force long)ptr); \
67 break; \ 67 break; \
68 case 4: \ 68 case 4: \
69 retval = __put_user_asm_l((void *)&x, \ 69 retval = __put_user_asm_l((void *)&x, \
70 (long)ptr); \ 70 (__force long)ptr); \
71 break; \ 71 break; \
72 case 8: \ 72 case 8: \
73 retval = __put_user_asm_q((void *)&x, \ 73 retval = __put_user_asm_q((void *)&x, \
74 (long)ptr); \ 74 (__force long)ptr); \
75 break; \ 75 break; \
76 default: \ 76 default: \
77 __put_user_unknown(); \ 77 __put_user_unknown(); \
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 9634d086fc56..64ee103dc29d 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -37,7 +37,7 @@
37#define get_fs() (current->thread.current_ds) 37#define get_fs() (current->thread.current_ds)
38#define set_fs(val) ((current->thread.current_ds) = (val)) 38#define set_fs(val) ((current->thread.current_ds) = (val))
39 39
40#define segment_eq(a,b) ((a).seg == (b).seg) 40#define segment_eq(a, b) ((a).seg == (b).seg)
41 41
42/* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test 42/* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
43 * can be fairly lightweight. 43 * can be fairly lightweight.
@@ -46,8 +46,8 @@
46 */ 46 */
47#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) 47#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
48#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 48#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
49#define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size))) 49#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
50#define access_ok(type, addr, size) \ 50#define access_ok(type, addr, size) \
51 ({ (void)(type); __access_ok((unsigned long)(addr), size); }) 51 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
52 52
53/* 53/*
@@ -91,158 +91,221 @@ void __ret_efault(void);
91 * of a performance impact. Thus we have a few rather ugly macros here, 91 * of a performance impact. Thus we have a few rather ugly macros here,
92 * and hide all the ugliness from the user. 92 * and hide all the ugliness from the user.
93 */ 93 */
94#define put_user(x,ptr) ({ \ 94#define put_user(x, ptr) ({ \
95unsigned long __pu_addr = (unsigned long)(ptr); \ 95 unsigned long __pu_addr = (unsigned long)(ptr); \
96__chk_user_ptr(ptr); \ 96 __chk_user_ptr(ptr); \
97__put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) 97 __put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \
98 98})
99#define get_user(x,ptr) ({ \ 99
100unsigned long __gu_addr = (unsigned long)(ptr); \ 100#define get_user(x, ptr) ({ \
101__chk_user_ptr(ptr); \ 101 unsigned long __gu_addr = (unsigned long)(ptr); \
102__get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) 102 __chk_user_ptr(ptr); \
103 __get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \
104})
103 105
104/* 106/*
105 * The "__xxx" versions do not do address space checking, useful when 107 * The "__xxx" versions do not do address space checking, useful when
106 * doing multiple accesses to the same area (the user has to do the 108 * doing multiple accesses to the same area (the user has to do the
107 * checks by hand with "access_ok()") 109 * checks by hand with "access_ok()")
108 */ 110 */
109#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 111#define __put_user(x, ptr) \
110#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr))) 112 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
113#define __get_user(x, ptr) \
114 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr)))
111 115
112struct __large_struct { unsigned long buf[100]; }; 116struct __large_struct { unsigned long buf[100]; };
113#define __m(x) ((struct __large_struct __user *)(x)) 117#define __m(x) ((struct __large_struct __user *)(x))
114 118
115#define __put_user_check(x,addr,size) ({ \ 119#define __put_user_check(x, addr, size) ({ \
116register int __pu_ret; \ 120 register int __pu_ret; \
117if (__access_ok(addr,size)) { \ 121 if (__access_ok(addr, size)) { \
118switch (size) { \ 122 switch (size) { \
119case 1: __put_user_asm(x,b,addr,__pu_ret); break; \ 123 case 1: \
120case 2: __put_user_asm(x,h,addr,__pu_ret); break; \ 124 __put_user_asm(x, b, addr, __pu_ret); \
121case 4: __put_user_asm(x,,addr,__pu_ret); break; \ 125 break; \
122case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ 126 case 2: \
123default: __pu_ret = __put_user_bad(); break; \ 127 __put_user_asm(x, h, addr, __pu_ret); \
124} } else { __pu_ret = -EFAULT; } __pu_ret; }) 128 break; \
125 129 case 4: \
126#define __put_user_nocheck(x,addr,size) ({ \ 130 __put_user_asm(x, , addr, __pu_ret); \
127register int __pu_ret; \ 131 break; \
128switch (size) { \ 132 case 8: \
129case 1: __put_user_asm(x,b,addr,__pu_ret); break; \ 133 __put_user_asm(x, d, addr, __pu_ret); \
130case 2: __put_user_asm(x,h,addr,__pu_ret); break; \ 134 break; \
131case 4: __put_user_asm(x,,addr,__pu_ret); break; \ 135 default: \
132case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ 136 __pu_ret = __put_user_bad(); \
133default: __pu_ret = __put_user_bad(); break; \ 137 break; \
134} __pu_ret; }) 138 } \
135 139 } else { \
136#define __put_user_asm(x,size,addr,ret) \ 140 __pu_ret = -EFAULT; \
141 } \
142 __pu_ret; \
143})
144
145#define __put_user_nocheck(x, addr, size) ({ \
146 register int __pu_ret; \
147 switch (size) { \
148 case 1: __put_user_asm(x, b, addr, __pu_ret); break; \
149 case 2: __put_user_asm(x, h, addr, __pu_ret); break; \
150 case 4: __put_user_asm(x, , addr, __pu_ret); break; \
151 case 8: __put_user_asm(x, d, addr, __pu_ret); break; \
152 default: __pu_ret = __put_user_bad(); break; \
153 } \
154 __pu_ret; \
155})
156
157#define __put_user_asm(x, size, addr, ret) \
137__asm__ __volatile__( \ 158__asm__ __volatile__( \
138 "/* Put user asm, inline. */\n" \ 159 "/* Put user asm, inline. */\n" \
139"1:\t" "st"#size " %1, %2\n\t" \ 160 "1:\t" "st"#size " %1, %2\n\t" \
140 "clr %0\n" \ 161 "clr %0\n" \
141"2:\n\n\t" \ 162 "2:\n\n\t" \
142 ".section .fixup,#alloc,#execinstr\n\t" \ 163 ".section .fixup,#alloc,#execinstr\n\t" \
143 ".align 4\n" \ 164 ".align 4\n" \
144"3:\n\t" \ 165 "3:\n\t" \
145 "b 2b\n\t" \ 166 "b 2b\n\t" \
146 " mov %3, %0\n\t" \ 167 " mov %3, %0\n\t" \
147 ".previous\n\n\t" \ 168 ".previous\n\n\t" \
148 ".section __ex_table,#alloc\n\t" \ 169 ".section __ex_table,#alloc\n\t" \
149 ".align 4\n\t" \ 170 ".align 4\n\t" \
150 ".word 1b, 3b\n\t" \ 171 ".word 1b, 3b\n\t" \
151 ".previous\n\n\t" \ 172 ".previous\n\n\t" \
152 : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ 173 : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \
153 "i" (-EFAULT)) 174 "i" (-EFAULT))
154 175
155int __put_user_bad(void); 176int __put_user_bad(void);
156 177
157#define __get_user_check(x,addr,size,type) ({ \ 178#define __get_user_check(x, addr, size, type) ({ \
158register int __gu_ret; \ 179 register int __gu_ret; \
159register unsigned long __gu_val; \ 180 register unsigned long __gu_val; \
160if (__access_ok(addr,size)) { \ 181 if (__access_ok(addr, size)) { \
161switch (size) { \ 182 switch (size) { \
162case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ 183 case 1: \
163case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ 184 __get_user_asm(__gu_val, ub, addr, __gu_ret); \
164case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \ 185 break; \
165case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \ 186 case 2: \
166default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ 187 __get_user_asm(__gu_val, uh, addr, __gu_ret); \
167} } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; }) 188 break; \
168 189 case 4: \
169#define __get_user_check_ret(x,addr,size,type,retval) ({ \ 190 __get_user_asm(__gu_val, , addr, __gu_ret); \
170register unsigned long __gu_val __asm__ ("l1"); \ 191 break; \
171if (__access_ok(addr,size)) { \ 192 case 8: \
172switch (size) { \ 193 __get_user_asm(__gu_val, d, addr, __gu_ret); \
173case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ 194 break; \
174case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ 195 default: \
175case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \ 196 __gu_val = 0; \
176case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \ 197 __gu_ret = __get_user_bad(); \
177default: if (__get_user_bad()) return retval; \ 198 break; \
178} x = (type) __gu_val; } else return retval; }) 199 } \
179 200 } else { \
180#define __get_user_nocheck(x,addr,size,type) ({ \ 201 __gu_val = 0; \
181register int __gu_ret; \ 202 __gu_ret = -EFAULT; \
182register unsigned long __gu_val; \ 203 } \
183switch (size) { \ 204 x = (__force type) __gu_val; \
184case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ 205 __gu_ret; \
185case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ 206})
186case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \ 207
187case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \ 208#define __get_user_check_ret(x, addr, size, type, retval) ({ \
188default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ 209 register unsigned long __gu_val __asm__ ("l1"); \
189} x = (type) __gu_val; __gu_ret; }) 210 if (__access_ok(addr, size)) { \
190 211 switch (size) { \
191#define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \ 212 case 1: \
192register unsigned long __gu_val __asm__ ("l1"); \ 213 __get_user_asm_ret(__gu_val, ub, addr, retval); \
193switch (size) { \ 214 break; \
194case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ 215 case 2: \
195case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ 216 __get_user_asm_ret(__gu_val, uh, addr, retval); \
196case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \ 217 break; \
197case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \ 218 case 4: \
198default: if (__get_user_bad()) return retval; \ 219 __get_user_asm_ret(__gu_val, , addr, retval); \
199} x = (type) __gu_val; }) 220 break; \
200 221 case 8: \
201#define __get_user_asm(x,size,addr,ret) \ 222 __get_user_asm_ret(__gu_val, d, addr, retval); \
223 break; \
224 default: \
225 if (__get_user_bad()) \
226 return retval; \
227 } \
228 x = (__force type) __gu_val; \
229 } else \
230 return retval; \
231})
232
233#define __get_user_nocheck(x, addr, size, type) ({ \
234 register int __gu_ret; \
235 register unsigned long __gu_val; \
236 switch (size) { \
237 case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
238 case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
239 case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break; \
240 case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break; \
241 default: \
242 __gu_val = 0; \
243 __gu_ret = __get_user_bad(); \
244 break; \
245 } \
246 x = (__force type) __gu_val; \
247 __gu_ret; \
248})
249
250#define __get_user_nocheck_ret(x, addr, size, type, retval) ({ \
251 register unsigned long __gu_val __asm__ ("l1"); \
252 switch (size) { \
253 case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \
254 case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \
255 case 4: __get_user_asm_ret(__gu_val, , addr, retval); break; \
256 case 8: __get_user_asm_ret(__gu_val, d, addr, retval); break; \
257 default: \
258 if (__get_user_bad()) \
259 return retval; \
260 } \
261 x = (__force type) __gu_val; \
262})
263
264#define __get_user_asm(x, size, addr, ret) \
202__asm__ __volatile__( \ 265__asm__ __volatile__( \
203 "/* Get user asm, inline. */\n" \ 266 "/* Get user asm, inline. */\n" \
204"1:\t" "ld"#size " %2, %1\n\t" \ 267 "1:\t" "ld"#size " %2, %1\n\t" \
205 "clr %0\n" \ 268 "clr %0\n" \
206"2:\n\n\t" \ 269 "2:\n\n\t" \
207 ".section .fixup,#alloc,#execinstr\n\t" \ 270 ".section .fixup,#alloc,#execinstr\n\t" \
208 ".align 4\n" \ 271 ".align 4\n" \
209"3:\n\t" \ 272 "3:\n\t" \
210 "clr %1\n\t" \ 273 "clr %1\n\t" \
211 "b 2b\n\t" \ 274 "b 2b\n\t" \
212 " mov %3, %0\n\n\t" \ 275 " mov %3, %0\n\n\t" \
213 ".previous\n\t" \ 276 ".previous\n\t" \
214 ".section __ex_table,#alloc\n\t" \ 277 ".section __ex_table,#alloc\n\t" \
215 ".align 4\n\t" \ 278 ".align 4\n\t" \
216 ".word 1b, 3b\n\n\t" \ 279 ".word 1b, 3b\n\n\t" \
217 ".previous\n\t" \ 280 ".previous\n\t" \
218 : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \ 281 : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \
219 "i" (-EFAULT)) 282 "i" (-EFAULT))
220 283
221#define __get_user_asm_ret(x,size,addr,retval) \ 284#define __get_user_asm_ret(x, size, addr, retval) \
222if (__builtin_constant_p(retval) && retval == -EFAULT) \ 285if (__builtin_constant_p(retval) && retval == -EFAULT) \
223__asm__ __volatile__( \ 286 __asm__ __volatile__( \
224 "/* Get user asm ret, inline. */\n" \ 287 "/* Get user asm ret, inline. */\n" \
225"1:\t" "ld"#size " %1, %0\n\n\t" \ 288 "1:\t" "ld"#size " %1, %0\n\n\t" \
226 ".section __ex_table,#alloc\n\t" \ 289 ".section __ex_table,#alloc\n\t" \
227 ".align 4\n\t" \ 290 ".align 4\n\t" \
228 ".word 1b,__ret_efault\n\n\t" \ 291 ".word 1b,__ret_efault\n\n\t" \
229 ".previous\n\t" \ 292 ".previous\n\t" \
230 : "=&r" (x) : "m" (*__m(addr))); \ 293 : "=&r" (x) : "m" (*__m(addr))); \
231else \ 294else \
232__asm__ __volatile__( \ 295 __asm__ __volatile__( \
233 "/* Get user asm ret, inline. */\n" \ 296 "/* Get user asm ret, inline. */\n" \
234"1:\t" "ld"#size " %1, %0\n\n\t" \ 297 "1:\t" "ld"#size " %1, %0\n\n\t" \
235 ".section .fixup,#alloc,#execinstr\n\t" \ 298 ".section .fixup,#alloc,#execinstr\n\t" \
236 ".align 4\n" \ 299 ".align 4\n" \
237"3:\n\t" \ 300 "3:\n\t" \
238 "ret\n\t" \ 301 "ret\n\t" \
239 " restore %%g0, %2, %%o0\n\n\t" \ 302 " restore %%g0, %2, %%o0\n\n\t" \
240 ".previous\n\t" \ 303 ".previous\n\t" \
241 ".section __ex_table,#alloc\n\t" \ 304 ".section __ex_table,#alloc\n\t" \
242 ".align 4\n\t" \ 305 ".align 4\n\t" \
243 ".word 1b, 3b\n\n\t" \ 306 ".word 1b, 3b\n\n\t" \
244 ".previous\n\t" \ 307 ".previous\n\t" \
245 : "=&r" (x) : "m" (*__m(addr)), "i" (retval)) 308 : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
246 309
247int __get_user_bad(void); 310int __get_user_bad(void);
248 311
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index c990a5e577f0..a35194b7dba0 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -41,11 +41,11 @@
41#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) 41#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
42#define get_ds() (KERNEL_DS) 42#define get_ds() (KERNEL_DS)
43 43
44#define segment_eq(a,b) ((a).seg == (b).seg) 44#define segment_eq(a, b) ((a).seg == (b).seg)
45 45
46#define set_fs(val) \ 46#define set_fs(val) \
47do { \ 47do { \
48 current_thread_info()->current_ds =(val).seg; \ 48 current_thread_info()->current_ds = (val).seg; \
49 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ 49 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
50} while(0) 50} while(0)
51 51
@@ -88,121 +88,135 @@ void __retl_efault(void);
88 * of a performance impact. Thus we have a few rather ugly macros here, 88 * of a performance impact. Thus we have a few rather ugly macros here,
89 * and hide all the ugliness from the user. 89 * and hide all the ugliness from the user.
90 */ 90 */
91#define put_user(x,ptr) ({ \ 91#define put_user(x, ptr) ({ \
92unsigned long __pu_addr = (unsigned long)(ptr); \ 92 unsigned long __pu_addr = (unsigned long)(ptr); \
93__chk_user_ptr(ptr); \ 93 __chk_user_ptr(ptr); \
94__put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) 94 __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
95})
95 96
96#define get_user(x,ptr) ({ \ 97#define get_user(x, ptr) ({ \
97unsigned long __gu_addr = (unsigned long)(ptr); \ 98 unsigned long __gu_addr = (unsigned long)(ptr); \
98__chk_user_ptr(ptr); \ 99 __chk_user_ptr(ptr); \
99__get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) 100 __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
101})
100 102
101#define __put_user(x,ptr) put_user(x,ptr) 103#define __put_user(x, ptr) put_user(x, ptr)
102#define __get_user(x,ptr) get_user(x,ptr) 104#define __get_user(x, ptr) get_user(x, ptr)
103 105
104struct __large_struct { unsigned long buf[100]; }; 106struct __large_struct { unsigned long buf[100]; };
105#define __m(x) ((struct __large_struct *)(x)) 107#define __m(x) ((struct __large_struct *)(x))
106 108
107#define __put_user_nocheck(data,addr,size) ({ \ 109#define __put_user_nocheck(data, addr, size) ({ \
108register int __pu_ret; \ 110 register int __pu_ret; \
109switch (size) { \ 111 switch (size) { \
110case 1: __put_user_asm(data,b,addr,__pu_ret); break; \ 112 case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
111case 2: __put_user_asm(data,h,addr,__pu_ret); break; \ 113 case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
112case 4: __put_user_asm(data,w,addr,__pu_ret); break; \ 114 case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
113case 8: __put_user_asm(data,x,addr,__pu_ret); break; \ 115 case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
114default: __pu_ret = __put_user_bad(); break; \ 116 default: __pu_ret = __put_user_bad(); break; \
115} __pu_ret; }) 117 } \
116 118 __pu_ret; \
117#define __put_user_asm(x,size,addr,ret) \ 119})
120
121#define __put_user_asm(x, size, addr, ret) \
118__asm__ __volatile__( \ 122__asm__ __volatile__( \
119 "/* Put user asm, inline. */\n" \ 123 "/* Put user asm, inline. */\n" \
120"1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ 124 "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
121 "clr %0\n" \ 125 "clr %0\n" \
122"2:\n\n\t" \ 126 "2:\n\n\t" \
123 ".section .fixup,#alloc,#execinstr\n\t" \ 127 ".section .fixup,#alloc,#execinstr\n\t" \
124 ".align 4\n" \ 128 ".align 4\n" \
125"3:\n\t" \ 129 "3:\n\t" \
126 "sethi %%hi(2b), %0\n\t" \ 130 "sethi %%hi(2b), %0\n\t" \
127 "jmpl %0 + %%lo(2b), %%g0\n\t" \ 131 "jmpl %0 + %%lo(2b), %%g0\n\t" \
128 " mov %3, %0\n\n\t" \ 132 " mov %3, %0\n\n\t" \
129 ".previous\n\t" \ 133 ".previous\n\t" \
130 ".section __ex_table,\"a\"\n\t" \ 134 ".section __ex_table,\"a\"\n\t" \
131 ".align 4\n\t" \ 135 ".align 4\n\t" \
132 ".word 1b, 3b\n\t" \ 136 ".word 1b, 3b\n\t" \
133 ".previous\n\n\t" \ 137 ".previous\n\n\t" \
134 : "=r" (ret) : "r" (x), "r" (__m(addr)), \ 138 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
135 "i" (-EFAULT)) 139 "i" (-EFAULT))
136 140
137int __put_user_bad(void); 141int __put_user_bad(void);
138 142
139#define __get_user_nocheck(data,addr,size,type) ({ \ 143#define __get_user_nocheck(data, addr, size, type) ({ \
140register int __gu_ret; \ 144 register int __gu_ret; \
141register unsigned long __gu_val; \ 145 register unsigned long __gu_val; \
142switch (size) { \ 146 switch (size) { \
143case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ 147 case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
144case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ 148 case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
145case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \ 149 case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
146case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \ 150 case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \
147default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ 151 default: \
148} data = (type) __gu_val; __gu_ret; }) 152 __gu_val = 0; \
149 153 __gu_ret = __get_user_bad(); \
150#define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \ 154 break; \
151register unsigned long __gu_val __asm__ ("l1"); \ 155 } \
152switch (size) { \ 156 data = (__force type) __gu_val; \
153case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ 157 __gu_ret; \
154case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ 158})
155case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \ 159
156case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \ 160#define __get_user_nocheck_ret(data, addr, size, type, retval) ({ \
157default: if (__get_user_bad()) return retval; \ 161 register unsigned long __gu_val __asm__ ("l1"); \
158} data = (type) __gu_val; }) 162 switch (size) { \
159 163 case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \
160#define __get_user_asm(x,size,addr,ret) \ 164 case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \
165 case 4: __get_user_asm_ret(__gu_val, uw, addr, retval); break; \
166 case 8: __get_user_asm_ret(__gu_val, x, addr, retval); break; \
167 default: \
168 if (__get_user_bad()) \
169 return retval; \
170 } \
171 data = (__force type) __gu_val; \
172})
173
174#define __get_user_asm(x, size, addr, ret) \
161__asm__ __volatile__( \ 175__asm__ __volatile__( \
162 "/* Get user asm, inline. */\n" \ 176 "/* Get user asm, inline. */\n" \
163"1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ 177 "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
164 "clr %0\n" \ 178 "clr %0\n" \
165"2:\n\n\t" \ 179 "2:\n\n\t" \
166 ".section .fixup,#alloc,#execinstr\n\t" \ 180 ".section .fixup,#alloc,#execinstr\n\t" \
167 ".align 4\n" \ 181 ".align 4\n" \
168"3:\n\t" \ 182 "3:\n\t" \
169 "sethi %%hi(2b), %0\n\t" \ 183 "sethi %%hi(2b), %0\n\t" \
170 "clr %1\n\t" \ 184 "clr %1\n\t" \
171 "jmpl %0 + %%lo(2b), %%g0\n\t" \ 185 "jmpl %0 + %%lo(2b), %%g0\n\t" \
172 " mov %3, %0\n\n\t" \ 186 " mov %3, %0\n\n\t" \
173 ".previous\n\t" \ 187 ".previous\n\t" \
174 ".section __ex_table,\"a\"\n\t" \ 188 ".section __ex_table,\"a\"\n\t" \
175 ".align 4\n\t" \ 189 ".align 4\n\t" \
176 ".word 1b, 3b\n\n\t" \ 190 ".word 1b, 3b\n\n\t" \
177 ".previous\n\t" \ 191 ".previous\n\t" \
178 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ 192 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
179 "i" (-EFAULT)) 193 "i" (-EFAULT))
180 194
181#define __get_user_asm_ret(x,size,addr,retval) \ 195#define __get_user_asm_ret(x, size, addr, retval) \
182if (__builtin_constant_p(retval) && retval == -EFAULT) \ 196if (__builtin_constant_p(retval) && retval == -EFAULT) \
183__asm__ __volatile__( \ 197 __asm__ __volatile__( \
184 "/* Get user asm ret, inline. */\n" \ 198 "/* Get user asm ret, inline. */\n" \
185"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ 199 "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
186 ".section __ex_table,\"a\"\n\t" \ 200 ".section __ex_table,\"a\"\n\t" \
187 ".align 4\n\t" \ 201 ".align 4\n\t" \
188 ".word 1b,__ret_efault\n\n\t" \ 202 ".word 1b,__ret_efault\n\n\t" \
189 ".previous\n\t" \ 203 ".previous\n\t" \
190 : "=r" (x) : "r" (__m(addr))); \ 204 : "=r" (x) : "r" (__m(addr))); \
191else \ 205else \
192__asm__ __volatile__( \ 206 __asm__ __volatile__( \
193 "/* Get user asm ret, inline. */\n" \ 207 "/* Get user asm ret, inline. */\n" \
194"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ 208 "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
195 ".section .fixup,#alloc,#execinstr\n\t" \ 209 ".section .fixup,#alloc,#execinstr\n\t" \
196 ".align 4\n" \ 210 ".align 4\n" \
197"3:\n\t" \ 211 "3:\n\t" \
198 "ret\n\t" \ 212 "ret\n\t" \
199 " restore %%g0, %2, %%o0\n\n\t" \ 213 " restore %%g0, %2, %%o0\n\n\t" \
200 ".previous\n\t" \ 214 ".previous\n\t" \
201 ".section __ex_table,\"a\"\n\t" \ 215 ".section __ex_table,\"a\"\n\t" \
202 ".align 4\n\t" \ 216 ".align 4\n\t" \
203 ".word 1b, 3b\n\n\t" \ 217 ".word 1b, 3b\n\n\t" \
204 ".previous\n\t" \ 218 ".previous\n\t" \
205 : "=r" (x) : "r" (__m(addr)), "i" (retval)) 219 : "=r" (x) : "r" (__m(addr)), "i" (retval))
206 220
207int __get_user_bad(void); 221int __get_user_bad(void);
208 222
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index eb1cf898ed3c..c2fb8a87dccb 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -488,6 +488,22 @@ config X86_INTEL_MID
488 Intel MID platforms are based on an Intel processor and chipset which 488 Intel MID platforms are based on an Intel processor and chipset which
489 consume less power than most of the x86 derivatives. 489 consume less power than most of the x86 derivatives.
490 490
491config X86_INTEL_QUARK
492 bool "Intel Quark platform support"
493 depends on X86_32
494 depends on X86_EXTENDED_PLATFORM
495 depends on X86_PLATFORM_DEVICES
496 depends on X86_TSC
497 depends on PCI
498 depends on PCI_GOANY
499 depends on X86_IO_APIC
500 select IOSF_MBI
501 select INTEL_IMR
502 ---help---
503 Select to include support for Quark X1000 SoC.
504 Say Y here if you have a Quark based system such as the Arduino
505 compatible Intel Galileo.
506
491config X86_INTEL_LPSS 507config X86_INTEL_LPSS
492 bool "Intel Low Power Subsystem Support" 508 bool "Intel Low Power Subsystem Support"
493 depends on ACPI 509 depends on ACPI
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 61bd2ad94281..20028da8ae18 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -313,6 +313,19 @@ config DEBUG_NMI_SELFTEST
313 313
314 If unsure, say N. 314 If unsure, say N.
315 315
316config DEBUG_IMR_SELFTEST
317 bool "Isolated Memory Region self test"
318 default n
319 depends on INTEL_IMR
320 ---help---
321 This option enables automated sanity testing of the IMR code.
322 Some simple tests are run to verify IMR bounds checking, alignment
323 and overlapping. This option is really only useful if you are
324 debugging an IMR memory map or are modifying the IMR code and want to
325 test your changes.
326
327 If unsure say N here.
328
316config X86_DEBUG_STATIC_CPU_HAS 329config X86_DEBUG_STATIC_CPU_HAS
317 bool "Debug alternatives" 330 bool "Debug alternatives"
318 depends on DEBUG_KERNEL 331 depends on DEBUG_KERNEL
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index 36b62bc52638..95eba554baf9 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -30,7 +30,7 @@ cflags-y += -ffreestanding
30# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use 30# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
31# a lot more stack due to the lack of sharing of stacklots. Also, gcc 31# a lot more stack due to the lack of sharing of stacklots. Also, gcc
32# 4.3.0 needs -funit-at-a-time for extern inline functions. 32# 4.3.0 needs -funit-at-a-time for extern inline functions.
33KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then \ 33KBUILD_CFLAGS += $(shell if [ $(cc-version) -lt 0400 ] ; then \
34 echo $(call cc-option,-fno-unit-at-a-time); \ 34 echo $(call cc-option,-fno-unit-at-a-time); \
35 else echo $(call cc-option,-funit-at-a-time); fi ;) 35 else echo $(call cc-option,-funit-at-a-time); fi ;)
36 36
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 843feb3eb20b..0a291cdfaf77 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -51,6 +51,7 @@ $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
51 51
52vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \ 52vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \
53 $(objtree)/drivers/firmware/efi/libstub/lib.a 53 $(objtree)/drivers/firmware/efi/libstub/lib.a
54vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
54 55
55$(obj)/vmlinux: $(vmlinux-objs-y) FORCE 56$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
56 $(call if_changed,ld) 57 $(call if_changed,ld)
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index bb1376381985..7083c16cccba 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -14,6 +14,13 @@
14static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" 14static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
15 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; 15 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
16 16
17struct kaslr_setup_data {
18 __u64 next;
19 __u32 type;
20 __u32 len;
21 __u8 data[1];
22} kaslr_setup_data;
23
17#define I8254_PORT_CONTROL 0x43 24#define I8254_PORT_CONTROL 0x43
18#define I8254_PORT_COUNTER0 0x40 25#define I8254_PORT_COUNTER0 0x40
19#define I8254_CMD_READBACK 0xC0 26#define I8254_CMD_READBACK 0xC0
@@ -295,7 +302,29 @@ static unsigned long find_random_addr(unsigned long minimum,
295 return slots_fetch_random(); 302 return slots_fetch_random();
296} 303}
297 304
298unsigned char *choose_kernel_location(unsigned char *input, 305static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled)
306{
307 struct setup_data *data;
308
309 kaslr_setup_data.type = SETUP_KASLR;
310 kaslr_setup_data.len = 1;
311 kaslr_setup_data.next = 0;
312 kaslr_setup_data.data[0] = enabled;
313
314 data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
315
316 while (data && data->next)
317 data = (struct setup_data *)(unsigned long)data->next;
318
319 if (data)
320 data->next = (unsigned long)&kaslr_setup_data;
321 else
322 params->hdr.setup_data = (unsigned long)&kaslr_setup_data;
323
324}
325
326unsigned char *choose_kernel_location(struct boot_params *params,
327 unsigned char *input,
299 unsigned long input_size, 328 unsigned long input_size,
300 unsigned char *output, 329 unsigned char *output,
301 unsigned long output_size) 330 unsigned long output_size)
@@ -306,14 +335,17 @@ unsigned char *choose_kernel_location(unsigned char *input,
306#ifdef CONFIG_HIBERNATION 335#ifdef CONFIG_HIBERNATION
307 if (!cmdline_find_option_bool("kaslr")) { 336 if (!cmdline_find_option_bool("kaslr")) {
308 debug_putstr("KASLR disabled by default...\n"); 337 debug_putstr("KASLR disabled by default...\n");
338 add_kaslr_setup_data(params, 0);
309 goto out; 339 goto out;
310 } 340 }
311#else 341#else
312 if (cmdline_find_option_bool("nokaslr")) { 342 if (cmdline_find_option_bool("nokaslr")) {
313 debug_putstr("KASLR disabled by cmdline...\n"); 343 debug_putstr("KASLR disabled by cmdline...\n");
344 add_kaslr_setup_data(params, 0);
314 goto out; 345 goto out;
315 } 346 }
316#endif 347#endif
348 add_kaslr_setup_data(params, 1);
317 349
318 /* Record the various known unsafe memory ranges. */ 350 /* Record the various known unsafe memory ranges. */
319 mem_avoid_init((unsigned long)input, input_size, 351 mem_avoid_init((unsigned long)input, input_size,
diff --git a/arch/x86/boot/compressed/efi_stub_64.S b/arch/x86/boot/compressed/efi_stub_64.S
index 7ff3632806b1..99494dff2113 100644
--- a/arch/x86/boot/compressed/efi_stub_64.S
+++ b/arch/x86/boot/compressed/efi_stub_64.S
@@ -3,28 +3,3 @@
3#include <asm/processor-flags.h> 3#include <asm/processor-flags.h>
4 4
5#include "../../platform/efi/efi_stub_64.S" 5#include "../../platform/efi/efi_stub_64.S"
6
7#ifdef CONFIG_EFI_MIXED
8 .code64
9 .text
10ENTRY(efi64_thunk)
11 push %rbp
12 push %rbx
13
14 subq $16, %rsp
15 leaq efi_exit32(%rip), %rax
16 movl %eax, 8(%rsp)
17 leaq efi_gdt64(%rip), %rax
18 movl %eax, 4(%rsp)
19 movl %eax, 2(%rax) /* Fixup the gdt base address */
20 leaq efi32_boot_gdt(%rip), %rax
21 movl %eax, (%rsp)
22
23 call __efi64_thunk
24
25 addq $16, %rsp
26 pop %rbx
27 pop %rbp
28 ret
29ENDPROC(efi64_thunk)
30#endif /* CONFIG_EFI_MIXED */
diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
new file mode 100644
index 000000000000..630384a4c14a
--- /dev/null
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -0,0 +1,196 @@
1/*
2 * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
3 *
4 * Early support for invoking 32-bit EFI services from a 64-bit kernel.
5 *
6 * Because this thunking occurs before ExitBootServices() we have to
7 * restore the firmware's 32-bit GDT before we make EFI serivce calls,
8 * since the firmware's 32-bit IDT is still currently installed and it
9 * needs to be able to service interrupts.
10 *
11 * On the plus side, we don't have to worry about mangling 64-bit
12 * addresses into 32-bits because we're executing with an identify
13 * mapped pagetable and haven't transitioned to 64-bit virtual addresses
14 * yet.
15 */
16
17#include <linux/linkage.h>
18#include <asm/msr.h>
19#include <asm/page_types.h>
20#include <asm/processor-flags.h>
21#include <asm/segment.h>
22
23 .code64
24 .text
25ENTRY(efi64_thunk)
26 push %rbp
27 push %rbx
28
29 subq $8, %rsp
30 leaq efi_exit32(%rip), %rax
31 movl %eax, 4(%rsp)
32 leaq efi_gdt64(%rip), %rax
33 movl %eax, (%rsp)
34 movl %eax, 2(%rax) /* Fixup the gdt base address */
35
36 movl %ds, %eax
37 push %rax
38 movl %es, %eax
39 push %rax
40 movl %ss, %eax
41 push %rax
42
43 /*
44 * Convert x86-64 ABI params to i386 ABI
45 */
46 subq $32, %rsp
47 movl %esi, 0x0(%rsp)
48 movl %edx, 0x4(%rsp)
49 movl %ecx, 0x8(%rsp)
50 movq %r8, %rsi
51 movl %esi, 0xc(%rsp)
52 movq %r9, %rsi
53 movl %esi, 0x10(%rsp)
54
55 sgdt save_gdt(%rip)
56
57 leaq 1f(%rip), %rbx
58 movq %rbx, func_rt_ptr(%rip)
59
60 /*
61 * Switch to gdt with 32-bit segments. This is the firmware GDT
62 * that was installed when the kernel started executing. This
63 * pointer was saved at the EFI stub entry point in head_64.S.
64 */
65 leaq efi32_boot_gdt(%rip), %rax
66 lgdt (%rax)
67
68 pushq $__KERNEL_CS
69 leaq efi_enter32(%rip), %rax
70 pushq %rax
71 lretq
72
731: addq $32, %rsp
74
75 lgdt save_gdt(%rip)
76
77 pop %rbx
78 movl %ebx, %ss
79 pop %rbx
80 movl %ebx, %es
81 pop %rbx
82 movl %ebx, %ds
83
84 /*
85 * Convert 32-bit status code into 64-bit.
86 */
87 test %rax, %rax
88 jz 1f
89 movl %eax, %ecx
90 andl $0x0fffffff, %ecx
91 andl $0xf0000000, %eax
92 shl $32, %rax
93 or %rcx, %rax
941:
95 addq $8, %rsp
96 pop %rbx
97 pop %rbp
98 ret
99ENDPROC(efi64_thunk)
100
101ENTRY(efi_exit32)
102 movq func_rt_ptr(%rip), %rax
103 push %rax
104 mov %rdi, %rax
105 ret
106ENDPROC(efi_exit32)
107
108 .code32
109/*
110 * EFI service pointer must be in %edi.
111 *
112 * The stack should represent the 32-bit calling convention.
113 */
114ENTRY(efi_enter32)
115 movl $__KERNEL_DS, %eax
116 movl %eax, %ds
117 movl %eax, %es
118 movl %eax, %ss
119
120 /* Reload pgtables */
121 movl %cr3, %eax
122 movl %eax, %cr3
123
124 /* Disable paging */
125 movl %cr0, %eax
126 btrl $X86_CR0_PG_BIT, %eax
127 movl %eax, %cr0
128
129 /* Disable long mode via EFER */
130 movl $MSR_EFER, %ecx
131 rdmsr
132 btrl $_EFER_LME, %eax
133 wrmsr
134
135 call *%edi
136
137 /* We must preserve return value */
138 movl %eax, %edi
139
140 /*
141 * Some firmware will return with interrupts enabled. Be sure to
142 * disable them before we switch GDTs.
143 */
144 cli
145
146 movl 56(%esp), %eax
147 movl %eax, 2(%eax)
148 lgdtl (%eax)
149
150 movl %cr4, %eax
151 btsl $(X86_CR4_PAE_BIT), %eax
152 movl %eax, %cr4
153
154 movl %cr3, %eax
155 movl %eax, %cr3
156
157 movl $MSR_EFER, %ecx
158 rdmsr
159 btsl $_EFER_LME, %eax
160 wrmsr
161
162 xorl %eax, %eax
163 lldt %ax
164
165 movl 60(%esp), %eax
166 pushl $__KERNEL_CS
167 pushl %eax
168
169 /* Enable paging */
170 movl %cr0, %eax
171 btsl $X86_CR0_PG_BIT, %eax
172 movl %eax, %cr0
173 lret
174ENDPROC(efi_enter32)
175
176 .data
177 .balign 8
178 .global efi32_boot_gdt
179efi32_boot_gdt: .word 0
180 .quad 0
181
182save_gdt: .word 0
183 .quad 0
184func_rt_ptr: .quad 0
185
186 .global efi_gdt64
187efi_gdt64:
188 .word efi_gdt64_end - efi_gdt64
189 .long 0 /* Filled out by user */
190 .word 0
191 .quad 0x0000000000000000 /* NULL descriptor */
192 .quad 0x00af9a000000ffff /* __KERNEL_CS */
193 .quad 0x00cf92000000ffff /* __KERNEL_DS */
194 .quad 0x0080890000000000 /* TS descriptor */
195 .quad 0x0000000000000000 /* TS continued */
196efi_gdt64_end:
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index a950864a64da..5903089c818f 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -401,7 +401,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
401 * the entire decompressed kernel plus relocation table, or the 401 * the entire decompressed kernel plus relocation table, or the
402 * entire decompressed kernel plus .bss and .brk sections. 402 * entire decompressed kernel plus .bss and .brk sections.
403 */ 403 */
404 output = choose_kernel_location(input_data, input_len, output, 404 output = choose_kernel_location(real_mode, input_data, input_len,
405 output,
405 output_len > run_size ? output_len 406 output_len > run_size ? output_len
406 : run_size); 407 : run_size);
407 408
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 04477d68403f..ee3576b2666b 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -57,7 +57,8 @@ int cmdline_find_option_bool(const char *option);
57 57
58#if CONFIG_RANDOMIZE_BASE 58#if CONFIG_RANDOMIZE_BASE
59/* aslr.c */ 59/* aslr.c */
60unsigned char *choose_kernel_location(unsigned char *input, 60unsigned char *choose_kernel_location(struct boot_params *params,
61 unsigned char *input,
61 unsigned long input_size, 62 unsigned long input_size,
62 unsigned char *output, 63 unsigned char *output,
63 unsigned long output_size); 64 unsigned long output_size);
@@ -65,7 +66,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
65bool has_cpuflag(int flag); 66bool has_cpuflag(int flag);
66#else 67#else
67static inline 68static inline
68unsigned char *choose_kernel_location(unsigned char *input, 69unsigned char *choose_kernel_location(struct boot_params *params,
70 unsigned char *input,
69 unsigned long input_size, 71 unsigned long input_size,
70 unsigned char *output, 72 unsigned char *output,
71 unsigned long output_size) 73 unsigned long output_size)
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 92003f3c8a42..efc3b22d896e 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -213,7 +213,15 @@ void register_lapic_address(unsigned long address);
213extern void setup_boot_APIC_clock(void); 213extern void setup_boot_APIC_clock(void);
214extern void setup_secondary_APIC_clock(void); 214extern void setup_secondary_APIC_clock(void);
215extern int APIC_init_uniprocessor(void); 215extern int APIC_init_uniprocessor(void);
216
217#ifdef CONFIG_X86_64
218static inline int apic_force_enable(unsigned long addr)
219{
220 return -1;
221}
222#else
216extern int apic_force_enable(unsigned long addr); 223extern int apic_force_enable(unsigned long addr);
224#endif
217 225
218extern int apic_bsp_setup(bool upmode); 226extern int apic_bsp_setup(bool upmode);
219extern void apic_ap_setup(void); 227extern void apic_ap_setup(void);
diff --git a/arch/x86/include/asm/imr.h b/arch/x86/include/asm/imr.h
new file mode 100644
index 000000000000..cd2ce4068441
--- /dev/null
+++ b/arch/x86/include/asm/imr.h
@@ -0,0 +1,60 @@
1/*
2 * imr.h: Isolated Memory Region API
3 *
4 * Copyright(c) 2013 Intel Corporation.
5 * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12#ifndef _IMR_H
13#define _IMR_H
14
15#include <linux/types.h>
16
17/*
18 * IMR agent access mask bits
19 * See section 12.7.4.7 from quark-x1000-datasheet.pdf for register
20 * definitions.
21 */
22#define IMR_ESRAM_FLUSH BIT(31)
23#define IMR_CPU_SNOOP BIT(30) /* Applicable only to write */
24#define IMR_RMU BIT(29)
25#define IMR_VC1_SAI_ID3 BIT(15)
26#define IMR_VC1_SAI_ID2 BIT(14)
27#define IMR_VC1_SAI_ID1 BIT(13)
28#define IMR_VC1_SAI_ID0 BIT(12)
29#define IMR_VC0_SAI_ID3 BIT(11)
30#define IMR_VC0_SAI_ID2 BIT(10)
31#define IMR_VC0_SAI_ID1 BIT(9)
32#define IMR_VC0_SAI_ID0 BIT(8)
33#define IMR_CPU_0 BIT(1) /* SMM mode */
34#define IMR_CPU BIT(0) /* Non SMM mode */
35#define IMR_ACCESS_NONE 0
36
37/*
38 * Read/Write access-all bits here include some reserved bits
39 * These are the values firmware uses and are accepted by hardware.
40 * The kernel defines read/write access-all in the same way as firmware
41 * in order to have a consistent and crisp definition across firmware,
42 * bootloader and kernel.
43 */
44#define IMR_READ_ACCESS_ALL 0xBFFFFFFF
45#define IMR_WRITE_ACCESS_ALL 0xFFFFFFFF
46
47/* Number of IMRs provided by Quark X1000 SoC */
48#define QUARK_X1000_IMR_MAX 0x08
49#define QUARK_X1000_IMR_REGBASE 0x40
50
51/* IMR alignment bits - only bits 31:10 are checked for IMR validity */
52#define IMR_ALIGN 0x400
53#define IMR_MASK (IMR_ALIGN - 1)
54
55int imr_add_range(phys_addr_t base, size_t size,
56 unsigned int rmask, unsigned int wmask, bool lock);
57
58int imr_remove_range(phys_addr_t base, size_t size);
59
60#endif /* _IMR_H */
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index 879fd7d33877..ef01fef3eebc 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -16,7 +16,6 @@
16#define LHCALL_SET_PTE 14 16#define LHCALL_SET_PTE 14
17#define LHCALL_SET_PGD 15 17#define LHCALL_SET_PGD 15
18#define LHCALL_LOAD_TLS 16 18#define LHCALL_LOAD_TLS 16
19#define LHCALL_NOTIFY 17
20#define LHCALL_LOAD_GDT_ENTRY 18 19#define LHCALL_LOAD_GDT_ENTRY 18
21#define LHCALL_SEND_INTERRUPTS 19 20#define LHCALL_SEND_INTERRUPTS 19
22 21
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index f97fbe3abb67..95e11f79f123 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -51,6 +51,8 @@ extern int devmem_is_allowed(unsigned long pagenr);
51extern unsigned long max_low_pfn_mapped; 51extern unsigned long max_low_pfn_mapped;
52extern unsigned long max_pfn_mapped; 52extern unsigned long max_pfn_mapped;
53 53
54extern bool kaslr_enabled;
55
54static inline phys_addr_t get_max_mapped(void) 56static inline phys_addr_t get_max_mapped(void)
55{ 57{
56 return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; 58 return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 67fc3d2b0aab..a0c35bf6cb92 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -476,12 +476,14 @@ static inline int pmd_present(pmd_t pmd)
476 */ 476 */
477static inline int pte_protnone(pte_t pte) 477static inline int pte_protnone(pte_t pte)
478{ 478{
479 return pte_flags(pte) & _PAGE_PROTNONE; 479 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
480 == _PAGE_PROTNONE;
480} 481}
481 482
482static inline int pmd_protnone(pmd_t pmd) 483static inline int pmd_protnone(pmd_t pmd)
483{ 484{
484 return pmd_flags(pmd) & _PAGE_PROTNONE; 485 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
486 == _PAGE_PROTNONE;
485} 487}
486#endif /* CONFIG_NUMA_BALANCING */ 488#endif /* CONFIG_NUMA_BALANCING */
487 489
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 7050d864f520..cf87de3fc390 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -46,7 +46,7 @@ static __always_inline bool static_key_false(struct static_key *key);
46 46
47static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) 47static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
48{ 48{
49 set_bit(0, (volatile unsigned long *)&lock->tickets.tail); 49 set_bit(0, (volatile unsigned long *)&lock->tickets.head);
50} 50}
51 51
52#else /* !CONFIG_PARAVIRT_SPINLOCKS */ 52#else /* !CONFIG_PARAVIRT_SPINLOCKS */
@@ -60,10 +60,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
60} 60}
61 61
62#endif /* CONFIG_PARAVIRT_SPINLOCKS */ 62#endif /* CONFIG_PARAVIRT_SPINLOCKS */
63static inline int __tickets_equal(__ticket_t one, __ticket_t two)
64{
65 return !((one ^ two) & ~TICKET_SLOWPATH_FLAG);
66}
67
68static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock,
69 __ticket_t head)
70{
71 if (head & TICKET_SLOWPATH_FLAG) {
72 arch_spinlock_t old, new;
73
74 old.tickets.head = head;
75 new.tickets.head = head & ~TICKET_SLOWPATH_FLAG;
76 old.tickets.tail = new.tickets.head + TICKET_LOCK_INC;
77 new.tickets.tail = old.tickets.tail;
78
79 /* try to clear slowpath flag when there are no contenders */
80 cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
81 }
82}
63 83
64static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 84static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
65{ 85{
66 return lock.tickets.head == lock.tickets.tail; 86 return __tickets_equal(lock.tickets.head, lock.tickets.tail);
67} 87}
68 88
69/* 89/*
@@ -87,18 +107,21 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
87 if (likely(inc.head == inc.tail)) 107 if (likely(inc.head == inc.tail))
88 goto out; 108 goto out;
89 109
90 inc.tail &= ~TICKET_SLOWPATH_FLAG;
91 for (;;) { 110 for (;;) {
92 unsigned count = SPIN_THRESHOLD; 111 unsigned count = SPIN_THRESHOLD;
93 112
94 do { 113 do {
95 if (READ_ONCE(lock->tickets.head) == inc.tail) 114 inc.head = READ_ONCE(lock->tickets.head);
96 goto out; 115 if (__tickets_equal(inc.head, inc.tail))
116 goto clear_slowpath;
97 cpu_relax(); 117 cpu_relax();
98 } while (--count); 118 } while (--count);
99 __ticket_lock_spinning(lock, inc.tail); 119 __ticket_lock_spinning(lock, inc.tail);
100 } 120 }
101out: barrier(); /* make sure nothing creeps before the lock is taken */ 121clear_slowpath:
122 __ticket_check_and_clear_slowpath(lock, inc.head);
123out:
124 barrier(); /* make sure nothing creeps before the lock is taken */
102} 125}
103 126
104static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 127static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -106,56 +129,30 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
106 arch_spinlock_t old, new; 129 arch_spinlock_t old, new;
107 130
108 old.tickets = READ_ONCE(lock->tickets); 131 old.tickets = READ_ONCE(lock->tickets);
109 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG)) 132 if (!__tickets_equal(old.tickets.head, old.tickets.tail))
110 return 0; 133 return 0;
111 134
112 new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT); 135 new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
136 new.head_tail &= ~TICKET_SLOWPATH_FLAG;
113 137
114 /* cmpxchg is a full barrier, so nothing can move before it */ 138 /* cmpxchg is a full barrier, so nothing can move before it */
115 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; 139 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
116} 140}
117 141
118static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
119 arch_spinlock_t old)
120{
121 arch_spinlock_t new;
122
123 BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
124
125 /* Perform the unlock on the "before" copy */
126 old.tickets.head += TICKET_LOCK_INC;
127
128 /* Clear the slowpath flag */
129 new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
130
131 /*
132 * If the lock is uncontended, clear the flag - use cmpxchg in
133 * case it changes behind our back though.
134 */
135 if (new.tickets.head != new.tickets.tail ||
136 cmpxchg(&lock->head_tail, old.head_tail,
137 new.head_tail) != old.head_tail) {
138 /*
139 * Lock still has someone queued for it, so wake up an
140 * appropriate waiter.
141 */
142 __ticket_unlock_kick(lock, old.tickets.head);
143 }
144}
145
146static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 142static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
147{ 143{
148 if (TICKET_SLOWPATH_FLAG && 144 if (TICKET_SLOWPATH_FLAG &&
149 static_key_false(&paravirt_ticketlocks_enabled)) { 145 static_key_false(&paravirt_ticketlocks_enabled)) {
150 arch_spinlock_t prev; 146 __ticket_t head;
151 147
152 prev = *lock; 148 BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
153 add_smp(&lock->tickets.head, TICKET_LOCK_INC);
154 149
155 /* add_smp() is a full mb() */ 150 head = xadd(&lock->tickets.head, TICKET_LOCK_INC);
156 151
157 if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG)) 152 if (unlikely(head & TICKET_SLOWPATH_FLAG)) {
158 __ticket_unlock_slowpath(lock, prev); 153 head &= ~TICKET_SLOWPATH_FLAG;
154 __ticket_unlock_kick(lock, (head + TICKET_LOCK_INC));
155 }
159 } else 156 } else
160 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); 157 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
161} 158}
@@ -164,14 +161,15 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
164{ 161{
165 struct __raw_tickets tmp = READ_ONCE(lock->tickets); 162 struct __raw_tickets tmp = READ_ONCE(lock->tickets);
166 163
167 return tmp.tail != tmp.head; 164 return !__tickets_equal(tmp.tail, tmp.head);
168} 165}
169 166
170static inline int arch_spin_is_contended(arch_spinlock_t *lock) 167static inline int arch_spin_is_contended(arch_spinlock_t *lock)
171{ 168{
172 struct __raw_tickets tmp = READ_ONCE(lock->tickets); 169 struct __raw_tickets tmp = READ_ONCE(lock->tickets);
173 170
174 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; 171 tmp.head &= ~TICKET_SLOWPATH_FLAG;
172 return (tmp.tail - tmp.head) > TICKET_LOCK_INC;
175} 173}
176#define arch_spin_is_contended arch_spin_is_contended 174#define arch_spin_is_contended arch_spin_is_contended
177 175
@@ -191,8 +189,8 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
191 * We need to check "unlocked" in a loop, tmp.head == head 189 * We need to check "unlocked" in a loop, tmp.head == head
192 * can be false positive because of overflow. 190 * can be false positive because of overflow.
193 */ 191 */
194 if (tmp.head == (tmp.tail & ~TICKET_SLOWPATH_FLAG) || 192 if (__tickets_equal(tmp.head, tmp.tail) ||
195 tmp.head != head) 193 !__tickets_equal(tmp.head, head))
196 break; 194 break;
197 195
198 cpu_relax(); 196 cpu_relax();
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 0d592e0a5b84..ace9dec050b1 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -179,7 +179,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
179 asm volatile("call __get_user_%P3" \ 179 asm volatile("call __get_user_%P3" \
180 : "=a" (__ret_gu), "=r" (__val_gu) \ 180 : "=a" (__ret_gu), "=r" (__val_gu) \
181 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 181 : "0" (ptr), "i" (sizeof(*(ptr)))); \
182 (x) = (__typeof__(*(ptr))) __val_gu; \ 182 (x) = (__force __typeof__(*(ptr))) __val_gu; \
183 __ret_gu; \ 183 __ret_gu; \
184}) 184})
185 185
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 225b0988043a..44e6dd7e36a2 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -7,6 +7,7 @@
7#define SETUP_DTB 2 7#define SETUP_DTB 2
8#define SETUP_PCI 3 8#define SETUP_PCI 3
9#define SETUP_EFI 4 9#define SETUP_EFI 4
10#define SETUP_KASLR 5
10 11
11/* ram_size flags */ 12/* ram_size flags */
12#define RAMDISK_IMAGE_START_MASK 0x07FF 13#define RAMDISK_IMAGE_START_MASK 0x07FF
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index ae97ed0873c6..3d525c6124f6 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -613,6 +613,11 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
613{ 613{
614 int rc, irq, trigger, polarity; 614 int rc, irq, trigger, polarity;
615 615
616 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
617 *irqp = gsi;
618 return 0;
619 }
620
616 rc = acpi_get_override_irq(gsi, &trigger, &polarity); 621 rc = acpi_get_override_irq(gsi, &trigger, &polarity);
617 if (rc == 0) { 622 if (rc == 0) {
618 trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; 623 trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index b5c8ff5e9dfc..2346c95c6ab1 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1396,6 +1396,12 @@ void cpu_init(void)
1396 1396
1397 wait_for_master_cpu(cpu); 1397 wait_for_master_cpu(cpu);
1398 1398
1399 /*
1400 * Initialize the CR4 shadow before doing anything that could
1401 * try to read it.
1402 */
1403 cr4_init_shadow();
1404
1399 show_ucode_info_early(); 1405 show_ucode_info_early();
1400 1406
1401 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1407 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 94d7dcb12145..50163fa9034f 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -565,8 +565,8 @@ static const struct _tlb_table intel_tlb_table[] = {
565 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, 565 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
566 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, 566 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
567 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, 567 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
568 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set ssociative" }, 568 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
569 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set ssociative" }, 569 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
570 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, 570 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
571 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, 571 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
572 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, 572 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index c6826d1e8082..746e7fd08aad 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
196 struct microcode_header_intel mc_header; 196 struct microcode_header_intel mc_header;
197 unsigned int mc_size; 197 unsigned int mc_size;
198 198
199 if (leftover < sizeof(mc_header)) {
200 pr_err("error! Truncated header in microcode data file\n");
201 break;
202 }
203
199 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header))) 204 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
200 break; 205 break;
201 206
diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
index ec9df6f9cd47..420eb933189c 100644
--- a/arch/x86/kernel/cpu/microcode/intel_early.c
+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
@@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start,
321 unsigned int mc_saved_count = mc_saved_data->mc_saved_count; 321 unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
322 int i; 322 int i;
323 323
324 while (leftover) { 324 while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
325
326 if (leftover < sizeof(mc_header))
327 break;
328
325 mc_header = (struct microcode_header_intel *)ucode_ptr; 329 mc_header = (struct microcode_header_intel *)ucode_ptr;
326 330
327 mc_size = get_totalsize(mc_header); 331 mc_size = get_totalsize(mc_header);
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 000d4199b03e..31e2d5bf3e38 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -982,6 +982,9 @@ ENTRY(xen_hypervisor_callback)
982ENTRY(xen_do_upcall) 982ENTRY(xen_do_upcall)
9831: mov %esp, %eax 9831: mov %esp, %eax
984 call xen_evtchn_do_upcall 984 call xen_evtchn_do_upcall
985#ifndef CONFIG_PREEMPT
986 call xen_maybe_preempt_hcall
987#endif
985 jmp ret_from_intr 988 jmp ret_from_intr
986 CFI_ENDPROC 989 CFI_ENDPROC
987ENDPROC(xen_hypervisor_callback) 990ENDPROC(xen_hypervisor_callback)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index db13655c3a2a..10074ad9ebf8 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1208,6 +1208,9 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1208 popq %rsp 1208 popq %rsp
1209 CFI_DEF_CFA_REGISTER rsp 1209 CFI_DEF_CFA_REGISTER rsp
1210 decl PER_CPU_VAR(irq_count) 1210 decl PER_CPU_VAR(irq_count)
1211#ifndef CONFIG_PREEMPT
1212 call xen_maybe_preempt_hcall
1213#endif
1211 jmp error_exit 1214 jmp error_exit
1212 CFI_ENDPROC 1215 CFI_ENDPROC
1213END(xen_do_hypervisor_callback) 1216END(xen_do_hypervisor_callback)
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 705ef8d48e2d..67b1cbe0093a 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -302,6 +302,9 @@ int check_irq_vectors_for_cpu_disable(void)
302 irq = __this_cpu_read(vector_irq[vector]); 302 irq = __this_cpu_read(vector_irq[vector]);
303 if (irq >= 0) { 303 if (irq >= 0) {
304 desc = irq_to_desc(irq); 304 desc = irq_to_desc(irq);
305 if (!desc)
306 continue;
307
305 data = irq_desc_get_irq_data(desc); 308 data = irq_desc_get_irq_data(desc);
306 cpumask_copy(&affinity_new, data->affinity); 309 cpumask_copy(&affinity_new, data->affinity);
307 cpu_clear(this_cpu, affinity_new); 310 cpu_clear(this_cpu, affinity_new);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 98f654d466e5..4e3d5a9621fe 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -84,7 +84,7 @@ static volatile u32 twobyte_is_boostable[256 / 32] = {
84 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 84 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
85 /* ---------------------------------------------- */ 85 /* ---------------------------------------------- */
86 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ 86 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
87 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */ 87 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
88 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ 88 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
89 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ 89 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
90 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ 90 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
@@ -223,27 +223,48 @@ static unsigned long
223__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) 223__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
224{ 224{
225 struct kprobe *kp; 225 struct kprobe *kp;
226 unsigned long faddr;
226 227
227 kp = get_kprobe((void *)addr); 228 kp = get_kprobe((void *)addr);
228 /* There is no probe, return original address */ 229 faddr = ftrace_location(addr);
229 if (!kp) 230 /*
231 * Addresses inside the ftrace location are refused by
232 * arch_check_ftrace_location(). Something went terribly wrong
233 * if such an address is checked here.
234 */
235 if (WARN_ON(faddr && faddr != addr))
236 return 0UL;
237 /*
238 * Use the current code if it is not modified by Kprobe
239 * and it cannot be modified by ftrace.
240 */
241 if (!kp && !faddr)
230 return addr; 242 return addr;
231 243
232 /* 244 /*
233 * Basically, kp->ainsn.insn has an original instruction. 245 * Basically, kp->ainsn.insn has an original instruction.
234 * However, RIP-relative instruction can not do single-stepping 246 * However, RIP-relative instruction can not do single-stepping
235 * at different place, __copy_instruction() tweaks the displacement of 247 * at different place, __copy_instruction() tweaks the displacement of
236 * that instruction. In that case, we can't recover the instruction 248 * that instruction. In that case, we can't recover the instruction
237 * from the kp->ainsn.insn. 249 * from the kp->ainsn.insn.
238 * 250 *
239 * On the other hand, kp->opcode has a copy of the first byte of 251 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
240 * the probed instruction, which is overwritten by int3. And 252 * of the first byte of the probed instruction, which is overwritten
241 * the instruction at kp->addr is not modified by kprobes except 253 * by int3. And the instruction at kp->addr is not modified by kprobes
242 * for the first byte, we can recover the original instruction 254 * except for the first byte, we can recover the original instruction
243 * from it and kp->opcode. 255 * from it and kp->opcode.
256 *
257 * In case of Kprobes using ftrace, we do not have a copy of
258 * the original instruction. In fact, the ftrace location might
259 * be modified at anytime and even could be in an inconsistent state.
260 * Fortunately, we know that the original code is the ideal 5-byte
261 * long NOP.
244 */ 262 */
245 memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 263 memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
246 buf[0] = kp->opcode; 264 if (faddr)
265 memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
266 else
267 buf[0] = kp->opcode;
247 return (unsigned long)buf; 268 return (unsigned long)buf;
248} 269}
249 270
@@ -251,6 +272,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
251 * Recover the probed instruction at addr for further analysis. 272 * Recover the probed instruction at addr for further analysis.
252 * Caller must lock kprobes by kprobe_mutex, or disable preemption 273 * Caller must lock kprobes by kprobe_mutex, or disable preemption
253 * for preventing to release referencing kprobes. 274 * for preventing to release referencing kprobes.
275 * Returns zero if the instruction can not get recovered.
254 */ 276 */
255unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) 277unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
256{ 278{
@@ -285,6 +307,8 @@ static int can_probe(unsigned long paddr)
285 * normally used, we just go through if there is no kprobe. 307 * normally used, we just go through if there is no kprobe.
286 */ 308 */
287 __addr = recover_probed_instruction(buf, addr); 309 __addr = recover_probed_instruction(buf, addr);
310 if (!__addr)
311 return 0;
288 kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE); 312 kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
289 insn_get_length(&insn); 313 insn_get_length(&insn);
290 314
@@ -333,6 +357,8 @@ int __copy_instruction(u8 *dest, u8 *src)
333 unsigned long recovered_insn = 357 unsigned long recovered_insn =
334 recover_probed_instruction(buf, (unsigned long)src); 358 recover_probed_instruction(buf, (unsigned long)src);
335 359
360 if (!recovered_insn)
361 return 0;
336 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); 362 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
337 insn_get_length(&insn); 363 insn_get_length(&insn);
338 /* Another subsystem puts a breakpoint, failed to recover */ 364 /* Another subsystem puts a breakpoint, failed to recover */
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 0dd8d089c315..7b3b9d15c47a 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -259,6 +259,8 @@ static int can_optimize(unsigned long paddr)
259 */ 259 */
260 return 0; 260 return 0;
261 recovered_insn = recover_probed_instruction(buf, addr); 261 recovered_insn = recover_probed_instruction(buf, addr);
262 if (!recovered_insn)
263 return 0;
262 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); 264 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
263 insn_get_length(&insn); 265 insn_get_length(&insn);
264 /* Another subsystem puts a breakpoint */ 266 /* Another subsystem puts a breakpoint */
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 94f643484300..e354cc6446ab 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -609,7 +609,7 @@ static inline void check_zero(void)
609 u8 ret; 609 u8 ret;
610 u8 old; 610 u8 old;
611 611
612 old = ACCESS_ONCE(zero_stats); 612 old = READ_ONCE(zero_stats);
613 if (unlikely(old)) { 613 if (unlikely(old)) {
614 ret = cmpxchg(&zero_stats, old, 0); 614 ret = cmpxchg(&zero_stats, old, 0);
615 /* This ensures only one fellow resets the stat */ 615 /* This ensures only one fellow resets the stat */
@@ -727,6 +727,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
727 int cpu; 727 int cpu;
728 u64 start; 728 u64 start;
729 unsigned long flags; 729 unsigned long flags;
730 __ticket_t head;
730 731
731 if (in_nmi()) 732 if (in_nmi())
732 return; 733 return;
@@ -768,11 +769,15 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
768 */ 769 */
769 __ticket_enter_slowpath(lock); 770 __ticket_enter_slowpath(lock);
770 771
772 /* make sure enter_slowpath, which is atomic does not cross the read */
773 smp_mb__after_atomic();
774
771 /* 775 /*
772 * check again make sure it didn't become free while 776 * check again make sure it didn't become free while
773 * we weren't looking. 777 * we weren't looking.
774 */ 778 */
775 if (ACCESS_ONCE(lock->tickets.head) == want) { 779 head = READ_ONCE(lock->tickets.head);
780 if (__tickets_equal(head, want)) {
776 add_stats(TAKEN_SLOW_PICKUP, 1); 781 add_stats(TAKEN_SLOW_PICKUP, 1);
777 goto out; 782 goto out;
778 } 783 }
@@ -803,8 +808,8 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
803 add_stats(RELEASED_SLOW, 1); 808 add_stats(RELEASED_SLOW, 1);
804 for_each_cpu(cpu, &waiting_cpus) { 809 for_each_cpu(cpu, &waiting_cpus) {
805 const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu); 810 const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
806 if (ACCESS_ONCE(w->lock) == lock && 811 if (READ_ONCE(w->lock) == lock &&
807 ACCESS_ONCE(w->want) == ticket) { 812 READ_ONCE(w->want) == ticket) {
808 add_stats(RELEASED_SLOW_KICKED, 1); 813 add_stats(RELEASED_SLOW_KICKED, 1);
809 kvm_kick_cpu(cpu); 814 kvm_kick_cpu(cpu);
810 break; 815 break;
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index d1ac80b72c72..9bbb9b35c144 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -47,21 +47,13 @@ do { \
47 47
48#ifdef CONFIG_RANDOMIZE_BASE 48#ifdef CONFIG_RANDOMIZE_BASE
49static unsigned long module_load_offset; 49static unsigned long module_load_offset;
50static int randomize_modules = 1;
51 50
52/* Mutex protects the module_load_offset. */ 51/* Mutex protects the module_load_offset. */
53static DEFINE_MUTEX(module_kaslr_mutex); 52static DEFINE_MUTEX(module_kaslr_mutex);
54 53
55static int __init parse_nokaslr(char *p)
56{
57 randomize_modules = 0;
58 return 0;
59}
60early_param("nokaslr", parse_nokaslr);
61
62static unsigned long int get_module_load_offset(void) 54static unsigned long int get_module_load_offset(void)
63{ 55{
64 if (randomize_modules) { 56 if (kaslr_enabled) {
65 mutex_lock(&module_kaslr_mutex); 57 mutex_lock(&module_kaslr_mutex);
66 /* 58 /*
67 * Calculate the module_load_offset the first time this 59 * Calculate the module_load_offset the first time this
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 0a2421cca01f..98dc9317286e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -122,6 +122,8 @@
122unsigned long max_low_pfn_mapped; 122unsigned long max_low_pfn_mapped;
123unsigned long max_pfn_mapped; 123unsigned long max_pfn_mapped;
124 124
125bool __read_mostly kaslr_enabled = false;
126
125#ifdef CONFIG_DMI 127#ifdef CONFIG_DMI
126RESERVE_BRK(dmi_alloc, 65536); 128RESERVE_BRK(dmi_alloc, 65536);
127#endif 129#endif
@@ -425,6 +427,11 @@ static void __init reserve_initrd(void)
425} 427}
426#endif /* CONFIG_BLK_DEV_INITRD */ 428#endif /* CONFIG_BLK_DEV_INITRD */
427 429
430static void __init parse_kaslr_setup(u64 pa_data, u32 data_len)
431{
432 kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data));
433}
434
428static void __init parse_setup_data(void) 435static void __init parse_setup_data(void)
429{ 436{
430 struct setup_data *data; 437 struct setup_data *data;
@@ -450,6 +457,9 @@ static void __init parse_setup_data(void)
450 case SETUP_EFI: 457 case SETUP_EFI:
451 parse_efi_setup(pa_data, data_len); 458 parse_efi_setup(pa_data, data_len);
452 break; 459 break;
460 case SETUP_KASLR:
461 parse_kaslr_setup(pa_data, data_len);
462 break;
453 default: 463 default:
454 break; 464 break;
455 } 465 }
@@ -832,10 +842,14 @@ static void __init trim_low_memory_range(void)
832static int 842static int
833dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) 843dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
834{ 844{
835 pr_emerg("Kernel Offset: 0x%lx from 0x%lx " 845 if (kaslr_enabled)
836 "(relocation range: 0x%lx-0x%lx)\n", 846 pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
837 (unsigned long)&_text - __START_KERNEL, __START_KERNEL, 847 (unsigned long)&_text - __START_KERNEL,
838 __START_KERNEL_map, MODULES_VADDR-1); 848 __START_KERNEL,
849 __START_KERNEL_map,
850 MODULES_VADDR-1);
851 else
852 pr_emerg("Kernel Offset: disabled\n");
839 853
840 return 0; 854 return 0;
841} 855}
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 8b96a947021f..81f8adb0679e 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -66,27 +66,54 @@
66 * Good-instruction tables for 32-bit apps. This is non-const and volatile 66 * Good-instruction tables for 32-bit apps. This is non-const and volatile
67 * to keep gcc from statically optimizing it out, as variable_test_bit makes 67 * to keep gcc from statically optimizing it out, as variable_test_bit makes
68 * some versions of gcc to think only *(unsigned long*) is used. 68 * some versions of gcc to think only *(unsigned long*) is used.
69 *
70 * Opcodes we'll probably never support:
71 * 6c-6f - ins,outs. SEGVs if used in userspace
72 * e4-e7 - in,out imm. SEGVs if used in userspace
73 * ec-ef - in,out acc. SEGVs if used in userspace
74 * cc - int3. SIGTRAP if used in userspace
75 * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs
76 * (why we support bound (62) then? it's similar, and similarly unused...)
77 * f1 - int1. SIGTRAP if used in userspace
78 * f4 - hlt. SEGVs if used in userspace
79 * fa - cli. SEGVs if used in userspace
80 * fb - sti. SEGVs if used in userspace
81 *
82 * Opcodes which need some work to be supported:
83 * 07,17,1f - pop es/ss/ds
84 * Normally not used in userspace, but would execute if used.
85 * Can cause GP or stack exception if tries to load wrong segment descriptor.
86 * We hesitate to run them under single step since kernel's handling
87 * of userspace single-stepping (TF flag) is fragile.
88 * We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e)
89 * on the same grounds that they are never used.
90 * cd - int N.
91 * Used by userspace for "int 80" syscall entry. (Other "int N"
92 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
93 * Not supported since kernel's handling of userspace single-stepping
94 * (TF flag) is fragile.
95 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
69 */ 96 */
70#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) 97#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
71static volatile u32 good_insns_32[256 / 32] = { 98static volatile u32 good_insns_32[256 / 32] = {
72 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 99 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
73 /* ---------------------------------------------- */ 100 /* ---------------------------------------------- */
74 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */ 101 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
75 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */ 102 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
76 W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */ 103 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
77 W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */ 104 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
78 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ 105 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
79 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ 106 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
80 W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ 107 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
81 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ 108 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
82 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ 109 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
83 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ 110 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
84 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */ 111 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
85 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ 112 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
86 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */ 113 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
87 W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ 114 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
88 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */ 115 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
89 W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ 116 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
90 /* ---------------------------------------------- */ 117 /* ---------------------------------------------- */
91 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 118 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
92}; 119};
@@ -94,27 +121,61 @@ static volatile u32 good_insns_32[256 / 32] = {
94#define good_insns_32 NULL 121#define good_insns_32 NULL
95#endif 122#endif
96 123
97/* Good-instruction tables for 64-bit apps */ 124/* Good-instruction tables for 64-bit apps.
125 *
126 * Genuinely invalid opcodes:
127 * 06,07 - formerly push/pop es
128 * 0e - formerly push cs
129 * 16,17 - formerly push/pop ss
130 * 1e,1f - formerly push/pop ds
131 * 27,2f,37,3f - formerly daa/das/aaa/aas
132 * 60,61 - formerly pusha/popa
133 * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported)
134 * 82 - formerly redundant encoding of Group1
135 * 9a - formerly call seg:ofs
136 * ce - formerly into
137 * d4,d5 - formerly aam/aad
138 * d6 - formerly undocumented salc
139 * ea - formerly jmp seg:ofs
140 *
141 * Opcodes we'll probably never support:
142 * 6c-6f - ins,outs. SEGVs if used in userspace
143 * e4-e7 - in,out imm. SEGVs if used in userspace
144 * ec-ef - in,out acc. SEGVs if used in userspace
145 * cc - int3. SIGTRAP if used in userspace
146 * f1 - int1. SIGTRAP if used in userspace
147 * f4 - hlt. SEGVs if used in userspace
148 * fa - cli. SEGVs if used in userspace
149 * fb - sti. SEGVs if used in userspace
150 *
151 * Opcodes which need some work to be supported:
152 * cd - int N.
153 * Used by userspace for "int 80" syscall entry. (Other "int N"
154 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
155 * Not supported since kernel's handling of userspace single-stepping
156 * (TF flag) is fragile.
157 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
158 */
98#if defined(CONFIG_X86_64) 159#if defined(CONFIG_X86_64)
99static volatile u32 good_insns_64[256 / 32] = { 160static volatile u32 good_insns_64[256 / 32] = {
100 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 161 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
101 /* ---------------------------------------------- */ 162 /* ---------------------------------------------- */
102 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */ 163 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */
103 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */ 164 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
104 W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */ 165 W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */
105 W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */ 166 W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */
106 W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */ 167 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
107 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ 168 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
108 W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ 169 W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
109 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ 170 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
110 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ 171 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
111 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ 172 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */
112 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */ 173 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
113 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ 174 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
114 W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */ 175 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
115 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ 176 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
116 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */ 177 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */
117 W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ 178 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
118 /* ---------------------------------------------- */ 179 /* ---------------------------------------------- */
119 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 180 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
120}; 181};
@@ -122,49 +183,55 @@ static volatile u32 good_insns_64[256 / 32] = {
122#define good_insns_64 NULL 183#define good_insns_64 NULL
123#endif 184#endif
124 185
125/* Using this for both 64-bit and 32-bit apps */ 186/* Using this for both 64-bit and 32-bit apps.
187 * Opcodes we don't support:
188 * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns
189 * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group.
190 * Also encodes tons of other system insns if mod=11.
191 * Some are in fact non-system: xend, xtest, rdtscp, maybe more
192 * 0f 05 - syscall
193 * 0f 06 - clts (CPL0 insn)
194 * 0f 07 - sysret
195 * 0f 08 - invd (CPL0 insn)
196 * 0f 09 - wbinvd (CPL0 insn)
197 * 0f 0b - ud2
198 * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?)
199 * 0f 34 - sysenter
200 * 0f 35 - sysexit
201 * 0f 37 - getsec
202 * 0f 78 - vmread (Intel VMX. CPL0 insn)
203 * 0f 79 - vmwrite (Intel VMX. CPL0 insn)
204 * Note: with prefixes, these two opcodes are
205 * extrq/insertq/AVX512 convert vector ops.
206 * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt],
207 * {rd,wr}{fs,gs}base,{s,l,m}fence.
208 * Why? They are all user-executable.
209 */
126static volatile u32 good_2byte_insns[256 / 32] = { 210static volatile u32 good_2byte_insns[256 / 32] = {
127 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 211 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
128 /* ---------------------------------------------- */ 212 /* ---------------------------------------------- */
129 W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */ 213 W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */
130 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */ 214 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
131 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ 215 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
132 W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ 216 W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
133 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ 217 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
134 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ 218 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
135 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */ 219 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
136 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ 220 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */
137 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ 221 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
138 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ 222 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
139 W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */ 223 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
140 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ 224 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
141 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ 225 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
142 W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ 226 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
143 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */ 227 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
144 W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */ 228 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* f0 */
145 /* ---------------------------------------------- */ 229 /* ---------------------------------------------- */
146 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 230 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
147}; 231};
148#undef W 232#undef W
149 233
150/* 234/*
151 * opcodes we'll probably never support:
152 *
153 * 6c-6d, e4-e5, ec-ed - in
154 * 6e-6f, e6-e7, ee-ef - out
155 * cc, cd - int3, int
156 * cf - iret
157 * d6 - illegal instruction
158 * f1 - int1/icebp
159 * f4 - hlt
160 * fa, fb - cli, sti
161 * 0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2
162 *
163 * invalid opcodes in 64-bit mode:
164 *
165 * 06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5
166 * 63 - we support this opcode in x86_64 but not in i386.
167 *
168 * opcodes we may need to refine support for: 235 * opcodes we may need to refine support for:
169 * 236 *
170 * 0f - 2-byte instructions: For many of these instructions, the validity 237 * 0f - 2-byte instructions: For many of these instructions, the validity
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index 4a0890f815c4..08f41caada45 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -1,6 +1,6 @@
1config LGUEST_GUEST 1config LGUEST_GUEST
2 bool "Lguest guest support" 2 bool "Lguest guest support"
3 depends on X86_32 && PARAVIRT 3 depends on X86_32 && PARAVIRT && PCI
4 select TTY 4 select TTY
5 select VIRTUALIZATION 5 select VIRTUALIZATION
6 select VIRTIO 6 select VIRTIO
@@ -8,7 +8,7 @@ config LGUEST_GUEST
8 help 8 help
9 Lguest is a tiny in-kernel hypervisor. Selecting this will 9 Lguest is a tiny in-kernel hypervisor. Selecting this will
10 allow your kernel to boot under lguest. This option will increase 10 allow your kernel to boot under lguest. This option will increase
11 your kernel size by about 6k. If in doubt, say N. 11 your kernel size by about 10k. If in doubt, say N.
12 12
13 If you say Y here, make sure you say Y (or M) to the virtio block 13 If you say Y here, make sure you say Y (or M) to the virtio block
14 and net drivers which lguest needs. 14 and net drivers which lguest needs.
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index c1c1544b8485..ac4453d8520e 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -56,6 +56,9 @@
56#include <linux/virtio_console.h> 56#include <linux/virtio_console.h>
57#include <linux/pm.h> 57#include <linux/pm.h>
58#include <linux/export.h> 58#include <linux/export.h>
59#include <linux/pci.h>
60#include <linux/virtio_pci.h>
61#include <asm/acpi.h>
59#include <asm/apic.h> 62#include <asm/apic.h>
60#include <asm/lguest.h> 63#include <asm/lguest.h>
61#include <asm/paravirt.h> 64#include <asm/paravirt.h>
@@ -71,6 +74,8 @@
71#include <asm/stackprotector.h> 74#include <asm/stackprotector.h>
72#include <asm/reboot.h> /* for struct machine_ops */ 75#include <asm/reboot.h> /* for struct machine_ops */
73#include <asm/kvm_para.h> 76#include <asm/kvm_para.h>
77#include <asm/pci_x86.h>
78#include <asm/pci-direct.h>
74 79
75/*G:010 80/*G:010
76 * Welcome to the Guest! 81 * Welcome to the Guest!
@@ -831,6 +836,24 @@ static struct irq_chip lguest_irq_controller = {
831 .irq_unmask = enable_lguest_irq, 836 .irq_unmask = enable_lguest_irq,
832}; 837};
833 838
839static int lguest_enable_irq(struct pci_dev *dev)
840{
841 u8 line = 0;
842
843 /* We literally use the PCI interrupt line as the irq number. */
844 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line);
845 irq_set_chip_and_handler_name(line, &lguest_irq_controller,
846 handle_level_irq, "level");
847 dev->irq = line;
848 return 0;
849}
850
851/* We don't do hotplug PCI, so this shouldn't be called. */
852static void lguest_disable_irq(struct pci_dev *dev)
853{
854 WARN_ON(1);
855}
856
834/* 857/*
835 * This sets up the Interrupt Descriptor Table (IDT) entry for each hardware 858 * This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
836 * interrupt (except 128, which is used for system calls), and then tells the 859 * interrupt (except 128, which is used for system calls), and then tells the
@@ -1181,25 +1204,136 @@ static __init char *lguest_memory_setup(void)
1181 return "LGUEST"; 1204 return "LGUEST";
1182} 1205}
1183 1206
1207/* Offset within PCI config space of BAR access capability. */
1208static int console_cfg_offset = 0;
1209static int console_access_cap;
1210
1211/* Set up so that we access off in bar0 (on bus 0, device 1, function 0) */
1212static void set_cfg_window(u32 cfg_offset, u32 off)
1213{
1214 write_pci_config_byte(0, 1, 0,
1215 cfg_offset + offsetof(struct virtio_pci_cap, bar),
1216 0);
1217 write_pci_config(0, 1, 0,
1218 cfg_offset + offsetof(struct virtio_pci_cap, length),
1219 4);
1220 write_pci_config(0, 1, 0,
1221 cfg_offset + offsetof(struct virtio_pci_cap, offset),
1222 off);
1223}
1224
1225static void write_bar_via_cfg(u32 cfg_offset, u32 off, u32 val)
1226{
1227 /*
1228 * We could set this up once, then leave it; nothing else in the *
1229 * kernel should touch these registers. But if it went wrong, that
1230 * would be a horrible bug to find.
1231 */
1232 set_cfg_window(cfg_offset, off);
1233 write_pci_config(0, 1, 0,
1234 cfg_offset + sizeof(struct virtio_pci_cap), val);
1235}
1236
1237static void probe_pci_console(void)
1238{
1239 u8 cap, common_cap = 0, device_cap = 0;
1240 /* Offset within BAR0 */
1241 u32 device_offset;
1242 u32 device_len;
1243
1244 /* Avoid recursive printk into here. */
1245 console_cfg_offset = -1;
1246
1247 if (!early_pci_allowed()) {
1248 printk(KERN_ERR "lguest: early PCI access not allowed!\n");
1249 return;
1250 }
1251
1252 /* We expect a console PCI device at BUS0, slot 1. */
1253 if (read_pci_config(0, 1, 0, 0) != 0x10431AF4) {
1254 printk(KERN_ERR "lguest: PCI device is %#x!\n",
1255 read_pci_config(0, 1, 0, 0));
1256 return;
1257 }
1258
1259 /* Find the capabilities we need (must be in bar0) */
1260 cap = read_pci_config_byte(0, 1, 0, PCI_CAPABILITY_LIST);
1261 while (cap) {
1262 u8 vndr = read_pci_config_byte(0, 1, 0, cap);
1263 if (vndr == PCI_CAP_ID_VNDR) {
1264 u8 type, bar;
1265 u32 offset, length;
1266
1267 type = read_pci_config_byte(0, 1, 0,
1268 cap + offsetof(struct virtio_pci_cap, cfg_type));
1269 bar = read_pci_config_byte(0, 1, 0,
1270 cap + offsetof(struct virtio_pci_cap, bar));
1271 offset = read_pci_config(0, 1, 0,
1272 cap + offsetof(struct virtio_pci_cap, offset));
1273 length = read_pci_config(0, 1, 0,
1274 cap + offsetof(struct virtio_pci_cap, length));
1275
1276 switch (type) {
1277 case VIRTIO_PCI_CAP_DEVICE_CFG:
1278 if (bar == 0) {
1279 device_cap = cap;
1280 device_offset = offset;
1281 device_len = length;
1282 }
1283 break;
1284 case VIRTIO_PCI_CAP_PCI_CFG:
1285 console_access_cap = cap;
1286 break;
1287 }
1288 }
1289 cap = read_pci_config_byte(0, 1, 0, cap + PCI_CAP_LIST_NEXT);
1290 }
1291 if (!device_cap || !console_access_cap) {
1292 printk(KERN_ERR "lguest: No caps (%u/%u/%u) in console!\n",
1293 common_cap, device_cap, console_access_cap);
1294 return;
1295 }
1296
1297 /*
1298 * Note that we can't check features, until we've set the DRIVER
1299 * status bit. We don't want to do that until we have a real driver,
1300 * so we just check that the device-specific config has room for
1301 * emerg_wr. If it doesn't support VIRTIO_CONSOLE_F_EMERG_WRITE
1302 * it should ignore the access.
1303 */
1304 if (device_len < (offsetof(struct virtio_console_config, emerg_wr)
1305 + sizeof(u32))) {
1306 printk(KERN_ERR "lguest: console missing emerg_wr field\n");
1307 return;
1308 }
1309
1310 console_cfg_offset = device_offset;
1311 printk(KERN_INFO "lguest: Console via virtio-pci emerg_wr\n");
1312}
1313
1184/* 1314/*
1185 * We will eventually use the virtio console device to produce console output, 1315 * We will eventually use the virtio console device to produce console output,
1186 * but before that is set up we use LHCALL_NOTIFY on normal memory to produce 1316 * but before that is set up we use the virtio PCI console's backdoor mmio
1187 * console output. 1317 * access and the "emergency" write facility (which is legal even before the
1318 * device is configured).
1188 */ 1319 */
1189static __init int early_put_chars(u32 vtermno, const char *buf, int count) 1320static __init int early_put_chars(u32 vtermno, const char *buf, int count)
1190{ 1321{
1191 char scratch[17]; 1322 /* If we couldn't find PCI console, forget it. */
1192 unsigned int len = count; 1323 if (console_cfg_offset < 0)
1324 return count;
1193 1325
1194 /* We use a nul-terminated string, so we make a copy. Icky, huh? */ 1326 if (unlikely(!console_cfg_offset)) {
1195 if (len > sizeof(scratch) - 1) 1327 probe_pci_console();
1196 len = sizeof(scratch) - 1; 1328 if (console_cfg_offset < 0)
1197 scratch[len] = '\0'; 1329 return count;
1198 memcpy(scratch, buf, len); 1330 }
1199 hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0);
1200 1331
1201 /* This routine returns the number of bytes actually written. */ 1332 write_bar_via_cfg(console_access_cap,
1202 return len; 1333 console_cfg_offset
1334 + offsetof(struct virtio_console_config, emerg_wr),
1335 buf[0]);
1336 return 1;
1203} 1337}
1204 1338
1205/* 1339/*
@@ -1400,14 +1534,6 @@ __init void lguest_init(void)
1400 atomic_notifier_chain_register(&panic_notifier_list, &paniced); 1534 atomic_notifier_chain_register(&panic_notifier_list, &paniced);
1401 1535
1402 /* 1536 /*
1403 * The IDE code spends about 3 seconds probing for disks: if we reserve
1404 * all the I/O ports up front it can't get them and so doesn't probe.
1405 * Other device drivers are similar (but less severe). This cuts the
1406 * kernel boot time on my machine from 4.1 seconds to 0.45 seconds.
1407 */
1408 paravirt_disable_iospace();
1409
1410 /*
1411 * This is messy CPU setup stuff which the native boot code does before 1537 * This is messy CPU setup stuff which the native boot code does before
1412 * start_kernel, so we have to do, too: 1538 * start_kernel, so we have to do, too:
1413 */ 1539 */
@@ -1436,6 +1562,13 @@ __init void lguest_init(void)
1436 /* Register our very early console. */ 1562 /* Register our very early console. */
1437 virtio_cons_early_init(early_put_chars); 1563 virtio_cons_early_init(early_put_chars);
1438 1564
1565 /* Don't let ACPI try to control our PCI interrupts. */
1566 disable_acpi();
1567
1568 /* We control them ourselves, by overriding these two hooks. */
1569 pcibios_enable_irq = lguest_enable_irq;
1570 pcibios_disable_irq = lguest_disable_irq;
1571
1439 /* 1572 /*
1440 * Last of all, we set the power management poweroff hook to point to 1573 * Last of all, we set the power management poweroff hook to point to
1441 * the Guest routine to power off, and the reboot hook to our restart 1574 * the Guest routine to power off, and the reboot hook to our restart
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 553c094b9cd7..a110efca6d06 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -238,6 +238,31 @@ static void __init_refok adjust_range_page_size_mask(struct map_range *mr,
238 } 238 }
239} 239}
240 240
241static const char *page_size_string(struct map_range *mr)
242{
243 static const char str_1g[] = "1G";
244 static const char str_2m[] = "2M";
245 static const char str_4m[] = "4M";
246 static const char str_4k[] = "4k";
247
248 if (mr->page_size_mask & (1<<PG_LEVEL_1G))
249 return str_1g;
250 /*
251 * 32-bit without PAE has a 4M large page size.
252 * PG_LEVEL_2M is misnamed, but we can at least
253 * print out the right size in the string.
254 */
255 if (IS_ENABLED(CONFIG_X86_32) &&
256 !IS_ENABLED(CONFIG_X86_PAE) &&
257 mr->page_size_mask & (1<<PG_LEVEL_2M))
258 return str_4m;
259
260 if (mr->page_size_mask & (1<<PG_LEVEL_2M))
261 return str_2m;
262
263 return str_4k;
264}
265
241static int __meminit split_mem_range(struct map_range *mr, int nr_range, 266static int __meminit split_mem_range(struct map_range *mr, int nr_range,
242 unsigned long start, 267 unsigned long start,
243 unsigned long end) 268 unsigned long end)
@@ -333,8 +358,7 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
333 for (i = 0; i < nr_range; i++) 358 for (i = 0; i < nr_range; i++)
334 printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", 359 printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
335 mr[i].start, mr[i].end - 1, 360 mr[i].start, mr[i].end - 1,
336 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( 361 page_size_string(&mr[i]));
337 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
338 362
339 return nr_range; 363 return nr_range;
340} 364}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 919b91205cd4..df4552bd239e 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -35,12 +35,12 @@ struct va_alignment __read_mostly va_align = {
35 .flags = -1, 35 .flags = -1,
36}; 36};
37 37
38static unsigned int stack_maxrandom_size(void) 38static unsigned long stack_maxrandom_size(void)
39{ 39{
40 unsigned int max = 0; 40 unsigned long max = 0;
41 if ((current->flags & PF_RANDOMIZE) && 41 if ((current->flags & PF_RANDOMIZE) &&
42 !(current->personality & ADDR_NO_RANDOMIZE)) { 42 !(current->personality & ADDR_NO_RANDOMIZE)) {
43 max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT; 43 max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT;
44 } 44 }
45 45
46 return max; 46 return max;
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
index 85afde1fa3e5..a62e0be3a2f1 100644
--- a/arch/x86/platform/Makefile
+++ b/arch/x86/platform/Makefile
@@ -5,6 +5,7 @@ obj-y += geode/
5obj-y += goldfish/ 5obj-y += goldfish/
6obj-y += iris/ 6obj-y += iris/
7obj-y += intel-mid/ 7obj-y += intel-mid/
8obj-y += intel-quark/
8obj-y += olpc/ 9obj-y += olpc/
9obj-y += scx200/ 10obj-y += scx200/
10obj-y += sfi/ 11obj-y += sfi/
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index 5fcda7272550..86d0f9e08dd9 100644
--- a/arch/x86/platform/efi/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
@@ -91,167 +91,6 @@ ENTRY(efi_call)
91 ret 91 ret
92ENDPROC(efi_call) 92ENDPROC(efi_call)
93 93
94#ifdef CONFIG_EFI_MIXED
95
96/*
97 * We run this function from the 1:1 mapping.
98 *
99 * This function must be invoked with a 1:1 mapped stack.
100 */
101ENTRY(__efi64_thunk)
102 movl %ds, %eax
103 push %rax
104 movl %es, %eax
105 push %rax
106 movl %ss, %eax
107 push %rax
108
109 subq $32, %rsp
110 movl %esi, 0x0(%rsp)
111 movl %edx, 0x4(%rsp)
112 movl %ecx, 0x8(%rsp)
113 movq %r8, %rsi
114 movl %esi, 0xc(%rsp)
115 movq %r9, %rsi
116 movl %esi, 0x10(%rsp)
117
118 sgdt save_gdt(%rip)
119
120 leaq 1f(%rip), %rbx
121 movq %rbx, func_rt_ptr(%rip)
122
123 /* Switch to gdt with 32-bit segments */
124 movl 64(%rsp), %eax
125 lgdt (%rax)
126
127 leaq efi_enter32(%rip), %rax
128 pushq $__KERNEL_CS
129 pushq %rax
130 lretq
131
1321: addq $32, %rsp
133
134 lgdt save_gdt(%rip)
135
136 pop %rbx
137 movl %ebx, %ss
138 pop %rbx
139 movl %ebx, %es
140 pop %rbx
141 movl %ebx, %ds
142
143 /*
144 * Convert 32-bit status code into 64-bit.
145 */
146 test %rax, %rax
147 jz 1f
148 movl %eax, %ecx
149 andl $0x0fffffff, %ecx
150 andl $0xf0000000, %eax
151 shl $32, %rax
152 or %rcx, %rax
1531:
154 ret
155ENDPROC(__efi64_thunk)
156
157ENTRY(efi_exit32)
158 movq func_rt_ptr(%rip), %rax
159 push %rax
160 mov %rdi, %rax
161 ret
162ENDPROC(efi_exit32)
163
164 .code32
165/*
166 * EFI service pointer must be in %edi.
167 *
168 * The stack should represent the 32-bit calling convention.
169 */
170ENTRY(efi_enter32)
171 movl $__KERNEL_DS, %eax
172 movl %eax, %ds
173 movl %eax, %es
174 movl %eax, %ss
175
176 /* Reload pgtables */
177 movl %cr3, %eax
178 movl %eax, %cr3
179
180 /* Disable paging */
181 movl %cr0, %eax
182 btrl $X86_CR0_PG_BIT, %eax
183 movl %eax, %cr0
184
185 /* Disable long mode via EFER */
186 movl $MSR_EFER, %ecx
187 rdmsr
188 btrl $_EFER_LME, %eax
189 wrmsr
190
191 call *%edi
192
193 /* We must preserve return value */
194 movl %eax, %edi
195
196 /*
197 * Some firmware will return with interrupts enabled. Be sure to
198 * disable them before we switch GDTs.
199 */
200 cli
201
202 movl 68(%esp), %eax
203 movl %eax, 2(%eax)
204 lgdtl (%eax)
205
206 movl %cr4, %eax
207 btsl $(X86_CR4_PAE_BIT), %eax
208 movl %eax, %cr4
209
210 movl %cr3, %eax
211 movl %eax, %cr3
212
213 movl $MSR_EFER, %ecx
214 rdmsr
215 btsl $_EFER_LME, %eax
216 wrmsr
217
218 xorl %eax, %eax
219 lldt %ax
220
221 movl 72(%esp), %eax
222 pushl $__KERNEL_CS
223 pushl %eax
224
225 /* Enable paging */
226 movl %cr0, %eax
227 btsl $X86_CR0_PG_BIT, %eax
228 movl %eax, %cr0
229 lret
230ENDPROC(efi_enter32)
231
232 .data
233 .balign 8
234 .global efi32_boot_gdt
235efi32_boot_gdt: .word 0
236 .quad 0
237
238save_gdt: .word 0
239 .quad 0
240func_rt_ptr: .quad 0
241
242 .global efi_gdt64
243efi_gdt64:
244 .word efi_gdt64_end - efi_gdt64
245 .long 0 /* Filled out by user */
246 .word 0
247 .quad 0x0000000000000000 /* NULL descriptor */
248 .quad 0x00af9a000000ffff /* __KERNEL_CS */
249 .quad 0x00cf92000000ffff /* __KERNEL_DS */
250 .quad 0x0080890000000000 /* TS descriptor */
251 .quad 0x0000000000000000 /* TS continued */
252efi_gdt64_end:
253#endif /* CONFIG_EFI_MIXED */
254
255 .data 94 .data
256ENTRY(efi_scratch) 95ENTRY(efi_scratch)
257 .fill 3,8,0 96 .fill 3,8,0
diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S
index 8806fa73e6e6..ff85d28c50f2 100644
--- a/arch/x86/platform/efi/efi_thunk_64.S
+++ b/arch/x86/platform/efi/efi_thunk_64.S
@@ -1,9 +1,26 @@
1/* 1/*
2 * Copyright (C) 2014 Intel Corporation; author Matt Fleming 2 * Copyright (C) 2014 Intel Corporation; author Matt Fleming
3 *
4 * Support for invoking 32-bit EFI runtime services from a 64-bit
5 * kernel.
6 *
7 * The below thunking functions are only used after ExitBootServices()
8 * has been called. This simplifies things considerably as compared with
9 * the early EFI thunking because we can leave all the kernel state
10 * intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime
11 * services from __KERNEL32_CS. This means we can continue to service
12 * interrupts across an EFI mixed mode call.
13 *
14 * We do however, need to handle the fact that we're running in a full
15 * 64-bit virtual address space. Things like the stack and instruction
16 * addresses need to be accessible by the 32-bit firmware, so we rely on
17 * using the identity mappings in the EFI page table to access the stack
18 * and kernel text (see efi_setup_page_tables()).
3 */ 19 */
4 20
5#include <linux/linkage.h> 21#include <linux/linkage.h>
6#include <asm/page_types.h> 22#include <asm/page_types.h>
23#include <asm/segment.h>
7 24
8 .text 25 .text
9 .code64 26 .code64
@@ -33,14 +50,6 @@ ENTRY(efi64_thunk)
33 leaq efi_exit32(%rip), %rbx 50 leaq efi_exit32(%rip), %rbx
34 subq %rax, %rbx 51 subq %rax, %rbx
35 movl %ebx, 8(%rsp) 52 movl %ebx, 8(%rsp)
36 leaq efi_gdt64(%rip), %rbx
37 subq %rax, %rbx
38 movl %ebx, 2(%ebx)
39 movl %ebx, 4(%rsp)
40 leaq efi_gdt32(%rip), %rbx
41 subq %rax, %rbx
42 movl %ebx, 2(%ebx)
43 movl %ebx, (%rsp)
44 53
45 leaq __efi64_thunk(%rip), %rbx 54 leaq __efi64_thunk(%rip), %rbx
46 subq %rax, %rbx 55 subq %rax, %rbx
@@ -52,14 +61,92 @@ ENTRY(efi64_thunk)
52 retq 61 retq
53ENDPROC(efi64_thunk) 62ENDPROC(efi64_thunk)
54 63
55 .data 64/*
56efi_gdt32: 65 * We run this function from the 1:1 mapping.
57 .word efi_gdt32_end - efi_gdt32 66 *
58 .long 0 /* Filled out above */ 67 * This function must be invoked with a 1:1 mapped stack.
59 .word 0 68 */
60 .quad 0x0000000000000000 /* NULL descriptor */ 69ENTRY(__efi64_thunk)
61 .quad 0x00cf9a000000ffff /* __KERNEL_CS */ 70 movl %ds, %eax
62 .quad 0x00cf93000000ffff /* __KERNEL_DS */ 71 push %rax
63efi_gdt32_end: 72 movl %es, %eax
73 push %rax
74 movl %ss, %eax
75 push %rax
76
77 subq $32, %rsp
78 movl %esi, 0x0(%rsp)
79 movl %edx, 0x4(%rsp)
80 movl %ecx, 0x8(%rsp)
81 movq %r8, %rsi
82 movl %esi, 0xc(%rsp)
83 movq %r9, %rsi
84 movl %esi, 0x10(%rsp)
85
86 leaq 1f(%rip), %rbx
87 movq %rbx, func_rt_ptr(%rip)
88
89 /* Switch to 32-bit descriptor */
90 pushq $__KERNEL32_CS
91 leaq efi_enter32(%rip), %rax
92 pushq %rax
93 lretq
94
951: addq $32, %rsp
96
97 pop %rbx
98 movl %ebx, %ss
99 pop %rbx
100 movl %ebx, %es
101 pop %rbx
102 movl %ebx, %ds
64 103
104 /*
105 * Convert 32-bit status code into 64-bit.
106 */
107 test %rax, %rax
108 jz 1f
109 movl %eax, %ecx
110 andl $0x0fffffff, %ecx
111 andl $0xf0000000, %eax
112 shl $32, %rax
113 or %rcx, %rax
1141:
115 ret
116ENDPROC(__efi64_thunk)
117
118ENTRY(efi_exit32)
119 movq func_rt_ptr(%rip), %rax
120 push %rax
121 mov %rdi, %rax
122 ret
123ENDPROC(efi_exit32)
124
125 .code32
126/*
127 * EFI service pointer must be in %edi.
128 *
129 * The stack should represent the 32-bit calling convention.
130 */
131ENTRY(efi_enter32)
132 movl $__KERNEL_DS, %eax
133 movl %eax, %ds
134 movl %eax, %es
135 movl %eax, %ss
136
137 call *%edi
138
139 /* We must preserve return value */
140 movl %eax, %edi
141
142 movl 72(%esp), %eax
143 pushl $__KERNEL_CS
144 pushl %eax
145
146 lret
147ENDPROC(efi_enter32)
148
149 .data
150 .balign 8
151func_rt_ptr: .quad 0
65efi_saved_sp: .quad 0 152efi_saved_sp: .quad 0
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index 1bbedc4b0f88..3005f0c89f2e 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -130,7 +130,7 @@ static void intel_mid_arch_setup(void)
130 intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip](); 130 intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
131 else { 131 else {
132 intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL](); 132 intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
133 pr_info("ARCH: Uknown SoC, assuming PENWELL!\n"); 133 pr_info("ARCH: Unknown SoC, assuming PENWELL!\n");
134 } 134 }
135 135
136out: 136out:
diff --git a/arch/x86/platform/intel-quark/Makefile b/arch/x86/platform/intel-quark/Makefile
new file mode 100644
index 000000000000..9cc57ed36022
--- /dev/null
+++ b/arch/x86/platform/intel-quark/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_INTEL_IMR) += imr.o
2obj-$(CONFIG_DEBUG_IMR_SELFTEST) += imr_selftest.o
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
new file mode 100644
index 000000000000..0ee619f9fcb7
--- /dev/null
+++ b/arch/x86/platform/intel-quark/imr.c
@@ -0,0 +1,661 @@
1/**
2 * imr.c
3 *
4 * Copyright(c) 2013 Intel Corporation.
5 * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
6 *
7 * IMR registers define an isolated region of memory that can
8 * be masked to prohibit certain system agents from accessing memory.
9 * When a device behind a masked port performs an access - snooped or
10 * not, an IMR may optionally prevent that transaction from changing
11 * the state of memory or from getting correct data in response to the
12 * operation.
13 *
14 * Write data will be dropped and reads will return 0xFFFFFFFF, the
15 * system will reset and system BIOS will print out an error message to
16 * inform the user that an IMR has been violated.
17 *
18 * This code is based on the Linux MTRR code and reference code from
19 * Intel's Quark BSP EFI, Linux and grub code.
20 *
21 * See quark-x1000-datasheet.pdf for register definitions.
22 * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf
23 */
24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
27#include <asm-generic/sections.h>
28#include <asm/cpu_device_id.h>
29#include <asm/imr.h>
30#include <asm/iosf_mbi.h>
31#include <linux/debugfs.h>
32#include <linux/init.h>
33#include <linux/mm.h>
34#include <linux/module.h>
35#include <linux/types.h>
36
37struct imr_device {
38 struct dentry *file;
39 bool init;
40 struct mutex lock;
41 int max_imr;
42 int reg_base;
43};
44
45static struct imr_device imr_dev;
46
47/*
48 * IMR read/write mask control registers.
49 * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for
50 * bit definitions.
51 *
52 * addr_hi
53 * 31 Lock bit
54 * 30:24 Reserved
55 * 23:2 1 KiB aligned lo address
56 * 1:0 Reserved
57 *
58 * addr_hi
59 * 31:24 Reserved
60 * 23:2 1 KiB aligned hi address
61 * 1:0 Reserved
62 */
63#define IMR_LOCK BIT(31)
64
65struct imr_regs {
66 u32 addr_lo;
67 u32 addr_hi;
68 u32 rmask;
69 u32 wmask;
70};
71
72#define IMR_NUM_REGS (sizeof(struct imr_regs)/sizeof(u32))
73#define IMR_SHIFT 8
74#define imr_to_phys(x) ((x) << IMR_SHIFT)
75#define phys_to_imr(x) ((x) >> IMR_SHIFT)
76
77/**
78 * imr_is_enabled - true if an IMR is enabled false otherwise.
79 *
80 * Determines if an IMR is enabled based on address range and read/write
81 * mask. An IMR set with an address range set to zero and a read/write
82 * access mask set to all is considered to be disabled. An IMR in any
83 * other state - for example set to zero but without read/write access
84 * all is considered to be enabled. This definition of disabled is how
85 * firmware switches off an IMR and is maintained in kernel for
86 * consistency.
87 *
88 * @imr: pointer to IMR descriptor.
89 * @return: true if IMR enabled false if disabled.
90 */
91static inline int imr_is_enabled(struct imr_regs *imr)
92{
93 return !(imr->rmask == IMR_READ_ACCESS_ALL &&
94 imr->wmask == IMR_WRITE_ACCESS_ALL &&
95 imr_to_phys(imr->addr_lo) == 0 &&
96 imr_to_phys(imr->addr_hi) == 0);
97}
98
99/**
100 * imr_read - read an IMR at a given index.
101 *
102 * Requires caller to hold imr mutex.
103 *
104 * @idev: pointer to imr_device structure.
105 * @imr_id: IMR entry to read.
106 * @imr: IMR structure representing address and access masks.
107 * @return: 0 on success or error code passed from mbi_iosf on failure.
108 */
109static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
110{
111 u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
112 int ret;
113
114 ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
115 reg++, &imr->addr_lo);
116 if (ret)
117 return ret;
118
119 ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
120 reg++, &imr->addr_hi);
121 if (ret)
122 return ret;
123
124 ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
125 reg++, &imr->rmask);
126 if (ret)
127 return ret;
128
129 return iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
130 reg++, &imr->wmask);
131}
132
133/**
134 * imr_write - write an IMR at a given index.
135 *
136 * Requires caller to hold imr mutex.
137 * Note lock bits need to be written independently of address bits.
138 *
139 * @idev: pointer to imr_device structure.
140 * @imr_id: IMR entry to write.
141 * @imr: IMR structure representing address and access masks.
142 * @lock: indicates if the IMR lock bit should be applied.
143 * @return: 0 on success or error code passed from mbi_iosf on failure.
144 */
145static int imr_write(struct imr_device *idev, u32 imr_id,
146 struct imr_regs *imr, bool lock)
147{
148 unsigned long flags;
149 u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
150 int ret;
151
152 local_irq_save(flags);
153
154 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, reg++,
155 imr->addr_lo);
156 if (ret)
157 goto failed;
158
159 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
160 reg++, imr->addr_hi);
161 if (ret)
162 goto failed;
163
164 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
165 reg++, imr->rmask);
166 if (ret)
167 goto failed;
168
169 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
170 reg++, imr->wmask);
171 if (ret)
172 goto failed;
173
174 /* Lock bit must be set separately to addr_lo address bits. */
175 if (lock) {
176 imr->addr_lo |= IMR_LOCK;
177 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
178 reg - IMR_NUM_REGS, imr->addr_lo);
179 if (ret)
180 goto failed;
181 }
182
183 local_irq_restore(flags);
184 return 0;
185failed:
186 /*
187 * If writing to the IOSF failed then we're in an unknown state,
188 * likely a very bad state. An IMR in an invalid state will almost
189 * certainly lead to a memory access violation.
190 */
191 local_irq_restore(flags);
192 WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n",
193 imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK);
194
195 return ret;
196}
197
198/**
199 * imr_dbgfs_state_show - print state of IMR registers.
200 *
201 * @s: pointer to seq_file for output.
202 * @unused: unused parameter.
203 * @return: 0 on success or error code passed from mbi_iosf on failure.
204 */
205static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
206{
207 phys_addr_t base;
208 phys_addr_t end;
209 int i;
210 struct imr_device *idev = s->private;
211 struct imr_regs imr;
212 size_t size;
213 int ret = -ENODEV;
214
215 mutex_lock(&idev->lock);
216
217 for (i = 0; i < idev->max_imr; i++) {
218
219 ret = imr_read(idev, i, &imr);
220 if (ret)
221 break;
222
223 /*
224 * Remember to add IMR_ALIGN bytes to size to indicate the
225 * inherent IMR_ALIGN size bytes contained in the masked away
226 * lower ten bits.
227 */
228 if (imr_is_enabled(&imr)) {
229 base = imr_to_phys(imr.addr_lo);
230 end = imr_to_phys(imr.addr_hi) + IMR_MASK;
231 } else {
232 base = 0;
233 end = 0;
234 }
235 size = end - base;
236 seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
237 "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
238 &base, &end, size, imr.rmask, imr.wmask,
239 imr_is_enabled(&imr) ? "enabled " : "disabled",
240 imr.addr_lo & IMR_LOCK ? "locked" : "unlocked");
241 }
242
243 mutex_unlock(&idev->lock);
244 return ret;
245}
246
247/**
248 * imr_state_open - debugfs open callback.
249 *
250 * @inode: pointer to struct inode.
251 * @file: pointer to struct file.
252 * @return: result of single open.
253 */
254static int imr_state_open(struct inode *inode, struct file *file)
255{
256 return single_open(file, imr_dbgfs_state_show, inode->i_private);
257}
258
259static const struct file_operations imr_state_ops = {
260 .open = imr_state_open,
261 .read = seq_read,
262 .llseek = seq_lseek,
263 .release = single_release,
264};
265
266/**
267 * imr_debugfs_register - register debugfs hooks.
268 *
269 * @idev: pointer to imr_device structure.
270 * @return: 0 on success - errno on failure.
271 */
272static int imr_debugfs_register(struct imr_device *idev)
273{
274 idev->file = debugfs_create_file("imr_state", S_IFREG | S_IRUGO, NULL,
275 idev, &imr_state_ops);
276 return PTR_ERR_OR_ZERO(idev->file);
277}
278
279/**
280 * imr_debugfs_unregister - unregister debugfs hooks.
281 *
282 * @idev: pointer to imr_device structure.
283 * @return:
284 */
285static void imr_debugfs_unregister(struct imr_device *idev)
286{
287 debugfs_remove(idev->file);
288}
289
290/**
291 * imr_check_params - check passed address range IMR alignment and non-zero size
292 *
293 * @base: base address of intended IMR.
294 * @size: size of intended IMR.
295 * @return: zero on valid range -EINVAL on unaligned base/size.
296 */
297static int imr_check_params(phys_addr_t base, size_t size)
298{
299 if ((base & IMR_MASK) || (size & IMR_MASK)) {
300 pr_err("base %pa size 0x%08zx must align to 1KiB\n",
301 &base, size);
302 return -EINVAL;
303 }
304 if (size == 0)
305 return -EINVAL;
306
307 return 0;
308}
309
310/**
311 * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends.
312 *
313 * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the
314 * value in the register. We need to subtract IMR_ALIGN bytes from input sizes
315 * as a result.
316 *
317 * @size: input size bytes.
318 * @return: reduced size.
319 */
320static inline size_t imr_raw_size(size_t size)
321{
322 return size - IMR_ALIGN;
323}
324
325/**
326 * imr_address_overlap - detects an address overlap.
327 *
328 * @addr: address to check against an existing IMR.
329 * @imr: imr being checked.
330 * @return: true for overlap false for no overlap.
331 */
332static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr)
333{
334 return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi);
335}
336
337/**
338 * imr_add_range - add an Isolated Memory Region.
339 *
340 * @base: physical base address of region aligned to 1KiB.
341 * @size: physical size of region in bytes must be aligned to 1KiB.
342 * @read_mask: read access mask.
343 * @write_mask: write access mask.
344 * @lock: indicates whether or not to permanently lock this region.
345 * @return: zero on success or negative value indicating error.
346 */
347int imr_add_range(phys_addr_t base, size_t size,
348 unsigned int rmask, unsigned int wmask, bool lock)
349{
350 phys_addr_t end;
351 unsigned int i;
352 struct imr_device *idev = &imr_dev;
353 struct imr_regs imr;
354 size_t raw_size;
355 int reg;
356 int ret;
357
358 if (WARN_ONCE(idev->init == false, "driver not initialized"))
359 return -ENODEV;
360
361 ret = imr_check_params(base, size);
362 if (ret)
363 return ret;
364
365 /* Tweak the size value. */
366 raw_size = imr_raw_size(size);
367 end = base + raw_size;
368
369 /*
370 * Check for reserved IMR value common to firmware, kernel and grub
371 * indicating a disabled IMR.
372 */
373 imr.addr_lo = phys_to_imr(base);
374 imr.addr_hi = phys_to_imr(end);
375 imr.rmask = rmask;
376 imr.wmask = wmask;
377 if (!imr_is_enabled(&imr))
378 return -ENOTSUPP;
379
380 mutex_lock(&idev->lock);
381
382 /*
383 * Find a free IMR while checking for an existing overlapping range.
384 * Note there's no restriction in silicon to prevent IMR overlaps.
385 * For the sake of simplicity and ease in defining/debugging an IMR
386 * memory map we exclude IMR overlaps.
387 */
388 reg = -1;
389 for (i = 0; i < idev->max_imr; i++) {
390 ret = imr_read(idev, i, &imr);
391 if (ret)
392 goto failed;
393
394 /* Find overlap @ base or end of requested range. */
395 ret = -EINVAL;
396 if (imr_is_enabled(&imr)) {
397 if (imr_address_overlap(base, &imr))
398 goto failed;
399 if (imr_address_overlap(end, &imr))
400 goto failed;
401 } else {
402 reg = i;
403 }
404 }
405
406 /* Error out if we have no free IMR entries. */
407 if (reg == -1) {
408 ret = -ENOMEM;
409 goto failed;
410 }
411
412 pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n",
413 reg, &base, &end, raw_size, rmask, wmask);
414
415 /* Enable IMR at specified range and access mask. */
416 imr.addr_lo = phys_to_imr(base);
417 imr.addr_hi = phys_to_imr(end);
418 imr.rmask = rmask;
419 imr.wmask = wmask;
420
421 ret = imr_write(idev, reg, &imr, lock);
422 if (ret < 0) {
423 /*
424 * In the highly unlikely event iosf_mbi_write failed
425 * attempt to rollback the IMR setup skipping the trapping
426 * of further IOSF write failures.
427 */
428 imr.addr_lo = 0;
429 imr.addr_hi = 0;
430 imr.rmask = IMR_READ_ACCESS_ALL;
431 imr.wmask = IMR_WRITE_ACCESS_ALL;
432 imr_write(idev, reg, &imr, false);
433 }
434failed:
435 mutex_unlock(&idev->lock);
436 return ret;
437}
438EXPORT_SYMBOL_GPL(imr_add_range);
439
440/**
441 * __imr_remove_range - delete an Isolated Memory Region.
442 *
443 * This function allows you to delete an IMR by its index specified by reg or
444 * by address range specified by base and size respectively. If you specify an
445 * index on its own the base and size parameters are ignored.
446 * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored.
447 * imr_remove_range(-1, base, size); delete IMR from base to base+size.
448 *
449 * @reg: imr index to remove.
450 * @base: physical base address of region aligned to 1 KiB.
451 * @size: physical size of region in bytes aligned to 1 KiB.
452 * @return: -EINVAL on invalid range or out or range id
453 * -ENODEV if reg is valid but no IMR exists or is locked
454 * 0 on success.
455 */
456static int __imr_remove_range(int reg, phys_addr_t base, size_t size)
457{
458 phys_addr_t end;
459 bool found = false;
460 unsigned int i;
461 struct imr_device *idev = &imr_dev;
462 struct imr_regs imr;
463 size_t raw_size;
464 int ret = 0;
465
466 if (WARN_ONCE(idev->init == false, "driver not initialized"))
467 return -ENODEV;
468
469 /*
470 * Validate address range if deleting by address, else we are
471 * deleting by index where base and size will be ignored.
472 */
473 if (reg == -1) {
474 ret = imr_check_params(base, size);
475 if (ret)
476 return ret;
477 }
478
479 /* Tweak the size value. */
480 raw_size = imr_raw_size(size);
481 end = base + raw_size;
482
483 mutex_lock(&idev->lock);
484
485 if (reg >= 0) {
486 /* If a specific IMR is given try to use it. */
487 ret = imr_read(idev, reg, &imr);
488 if (ret)
489 goto failed;
490
491 if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) {
492 ret = -ENODEV;
493 goto failed;
494 }
495 found = true;
496 } else {
497 /* Search for match based on address range. */
498 for (i = 0; i < idev->max_imr; i++) {
499 ret = imr_read(idev, i, &imr);
500 if (ret)
501 goto failed;
502
503 if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK)
504 continue;
505
506 if ((imr_to_phys(imr.addr_lo) == base) &&
507 (imr_to_phys(imr.addr_hi) == end)) {
508 found = true;
509 reg = i;
510 break;
511 }
512 }
513 }
514
515 if (!found) {
516 ret = -ENODEV;
517 goto failed;
518 }
519
520 pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size);
521
522 /* Tear down the IMR. */
523 imr.addr_lo = 0;
524 imr.addr_hi = 0;
525 imr.rmask = IMR_READ_ACCESS_ALL;
526 imr.wmask = IMR_WRITE_ACCESS_ALL;
527
528 ret = imr_write(idev, reg, &imr, false);
529
530failed:
531 mutex_unlock(&idev->lock);
532 return ret;
533}
534
535/**
536 * imr_remove_range - delete an Isolated Memory Region by address
537 *
538 * This function allows you to delete an IMR by an address range specified
539 * by base and size respectively.
540 * imr_remove_range(base, size); delete IMR from base to base+size.
541 *
542 * @base: physical base address of region aligned to 1 KiB.
543 * @size: physical size of region in bytes aligned to 1 KiB.
544 * @return: -EINVAL on invalid range or out or range id
545 * -ENODEV if reg is valid but no IMR exists or is locked
546 * 0 on success.
547 */
548int imr_remove_range(phys_addr_t base, size_t size)
549{
550 return __imr_remove_range(-1, base, size);
551}
552EXPORT_SYMBOL_GPL(imr_remove_range);
553
554/**
555 * imr_clear - delete an Isolated Memory Region by index
556 *
557 * This function allows you to delete an IMR by an address range specified
558 * by the index of the IMR. Useful for initial sanitization of the IMR
559 * address map.
560 * imr_ge(base, size); delete IMR from base to base+size.
561 *
562 * @reg: imr index to remove.
563 * @return: -EINVAL on invalid range or out or range id
564 * -ENODEV if reg is valid but no IMR exists or is locked
565 * 0 on success.
566 */
567static inline int imr_clear(int reg)
568{
569 return __imr_remove_range(reg, 0, 0);
570}
571
572/**
573 * imr_fixup_memmap - Tear down IMRs used during bootup.
574 *
575 * BIOS and Grub both setup IMRs around compressed kernel, initrd memory
576 * that need to be removed before the kernel hands out one of the IMR
577 * encased addresses to a downstream DMA agent such as the SD or Ethernet.
578 * IMRs on Galileo are setup to immediately reset the system on violation.
579 * As a result if you're running a root filesystem from SD - you'll need
580 * the boot-time IMRs torn down or you'll find seemingly random resets when
581 * using your filesystem.
582 *
583 * @idev: pointer to imr_device structure.
584 * @return:
585 */
586static void __init imr_fixup_memmap(struct imr_device *idev)
587{
588 phys_addr_t base = virt_to_phys(&_text);
589 size_t size = virt_to_phys(&__end_rodata) - base;
590 int i;
591 int ret;
592
593 /* Tear down all existing unlocked IMRs. */
594 for (i = 0; i < idev->max_imr; i++)
595 imr_clear(i);
596
597 /*
598 * Setup a locked IMR around the physical extent of the kernel
599 * from the beginning of the .text secton to the end of the
600 * .rodata section as one physically contiguous block.
601 */
602 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
603 if (ret < 0) {
604 pr_err("unable to setup IMR for kernel: (%p - %p)\n",
605 &_text, &__end_rodata);
606 } else {
607 pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n",
608 size / 1024, &_text, &__end_rodata);
609 }
610
611}
612
613static const struct x86_cpu_id imr_ids[] __initconst = {
614 { X86_VENDOR_INTEL, 5, 9 }, /* Intel Quark SoC X1000. */
615 {}
616};
617MODULE_DEVICE_TABLE(x86cpu, imr_ids);
618
619/**
620 * imr_init - entry point for IMR driver.
621 *
622 * return: -ENODEV for no IMR support 0 if good to go.
623 */
624static int __init imr_init(void)
625{
626 struct imr_device *idev = &imr_dev;
627 int ret;
628
629 if (!x86_match_cpu(imr_ids) || !iosf_mbi_available())
630 return -ENODEV;
631
632 idev->max_imr = QUARK_X1000_IMR_MAX;
633 idev->reg_base = QUARK_X1000_IMR_REGBASE;
634 idev->init = true;
635
636 mutex_init(&idev->lock);
637 ret = imr_debugfs_register(idev);
638 if (ret != 0)
639 pr_warn("debugfs register failed!\n");
640 imr_fixup_memmap(idev);
641 return 0;
642}
643
644/**
645 * imr_exit - exit point for IMR code.
646 *
647 * Deregisters debugfs, leave IMR state as-is.
648 *
649 * return:
650 */
651static void __exit imr_exit(void)
652{
653 imr_debugfs_unregister(&imr_dev);
654}
655
656module_init(imr_init);
657module_exit(imr_exit);
658
659MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>");
660MODULE_DESCRIPTION("Intel Isolated Memory Region driver");
661MODULE_LICENSE("Dual BSD/GPL");
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
new file mode 100644
index 000000000000..c9a0838890e2
--- /dev/null
+++ b/arch/x86/platform/intel-quark/imr_selftest.c
@@ -0,0 +1,129 @@
1/**
2 * imr_selftest.c
3 *
4 * Copyright(c) 2013 Intel Corporation.
5 * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
6 *
7 * IMR self test. The purpose of this module is to run a set of tests on the
8 * IMR API to validate it's sanity. We check for overlapping, reserved
9 * addresses and setup/teardown sanity.
10 *
11 */
12
13#include <asm-generic/sections.h>
14#include <asm/imr.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/types.h>
19
20#define SELFTEST KBUILD_MODNAME ": "
21/**
22 * imr_self_test_result - Print result string for self test.
23 *
24 * @res: result code - true if test passed false otherwise.
25 * @fmt: format string.
26 * ... variadic argument list.
27 */
28static void __init imr_self_test_result(int res, const char *fmt, ...)
29{
30 va_list vlist;
31
32 /* Print pass/fail. */
33 if (res)
34 pr_info(SELFTEST "pass ");
35 else
36 pr_info(SELFTEST "fail ");
37
38 /* Print variable string. */
39 va_start(vlist, fmt);
40 vprintk(fmt, vlist);
41 va_end(vlist);
42
43 /* Optional warning. */
44 WARN(res == 0, "test failed");
45}
46#undef SELFTEST
47
48/**
49 * imr_self_test
50 *
51 * Verify IMR self_test with some simple tests to verify overlap,
52 * zero sized allocations and 1 KiB sized areas.
53 *
54 */
55static void __init imr_self_test(void)
56{
57 phys_addr_t base = virt_to_phys(&_text);
58 size_t size = virt_to_phys(&__end_rodata) - base;
59 const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
60 int ret;
61
62 /* Test zero zero. */
63 ret = imr_add_range(0, 0, 0, 0, false);
64 imr_self_test_result(ret < 0, "zero sized IMR\n");
65
66 /* Test exact overlap. */
67 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
68 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
69
70 /* Test overlap with base inside of existing. */
71 base += size - IMR_ALIGN;
72 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
73 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
74
75 /* Test overlap with end inside of existing. */
76 base -= size + IMR_ALIGN * 2;
77 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
78 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
79
80 /* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */
81 ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL,
82 IMR_WRITE_ACCESS_ALL, false);
83 imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n");
84
85 /* Test that a 1 KiB IMR @ zero with CPU only will work. */
86 ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU, false);
87 imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n");
88 if (ret >= 0) {
89 ret = imr_remove_range(0, IMR_ALIGN);
90 imr_self_test_result(ret == 0, "teardown - cpu-access\n");
91 }
92
93 /* Test 2 KiB works. */
94 size = IMR_ALIGN * 2;
95 ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL,
96 IMR_WRITE_ACCESS_ALL, false);
97 imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n");
98 if (ret >= 0) {
99 ret = imr_remove_range(0, size);
100 imr_self_test_result(ret == 0, "teardown 2KiB\n");
101 }
102}
103
104/**
105 * imr_self_test_init - entry point for IMR driver.
106 *
107 * return: -ENODEV for no IMR support 0 if good to go.
108 */
109static int __init imr_self_test_init(void)
110{
111 imr_self_test();
112 return 0;
113}
114
115/**
116 * imr_self_test_exit - exit point for IMR code.
117 *
118 * return:
119 */
120static void __exit imr_self_test_exit(void)
121{
122}
123
124module_init(imr_self_test_init);
125module_exit(imr_self_test_exit);
126
127MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>");
128MODULE_DESCRIPTION("Intel Isolated Memory Region self-test driver");
129MODULE_LICENSE("Dual BSD/GPL");
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index bd8b8459c3d0..5240f563076d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1070,6 +1070,23 @@ static inline void xen_write_cr8(unsigned long val)
1070 BUG_ON(val); 1070 BUG_ON(val);
1071} 1071}
1072#endif 1072#endif
1073
1074static u64 xen_read_msr_safe(unsigned int msr, int *err)
1075{
1076 u64 val;
1077
1078 val = native_read_msr_safe(msr, err);
1079 switch (msr) {
1080 case MSR_IA32_APICBASE:
1081#ifdef CONFIG_X86_X2APIC
1082 if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
1083#endif
1084 val &= ~X2APIC_ENABLE;
1085 break;
1086 }
1087 return val;
1088}
1089
1073static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) 1090static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
1074{ 1091{
1075 int ret; 1092 int ret;
@@ -1240,7 +1257,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1240 1257
1241 .wbinvd = native_wbinvd, 1258 .wbinvd = native_wbinvd,
1242 1259
1243 .read_msr = native_read_msr_safe, 1260 .read_msr = xen_read_msr_safe,
1244 .write_msr = xen_write_msr_safe, 1261 .write_msr = xen_write_msr_safe,
1245 1262
1246 .read_tsc = native_read_tsc, 1263 .read_tsc = native_read_tsc,
@@ -1741,6 +1758,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
1741#ifdef CONFIG_X86_32 1758#ifdef CONFIG_X86_32
1742 i386_start_kernel(); 1759 i386_start_kernel();
1743#else 1760#else
1761 cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
1744 x86_64_start_reservations((char *)__pa_symbol(&boot_params)); 1762 x86_64_start_reservations((char *)__pa_symbol(&boot_params));
1745#endif 1763#endif
1746} 1764}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 23b45eb9a89c..956374c1edbc 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -41,7 +41,7 @@ static u8 zero_stats;
41static inline void check_zero(void) 41static inline void check_zero(void)
42{ 42{
43 u8 ret; 43 u8 ret;
44 u8 old = ACCESS_ONCE(zero_stats); 44 u8 old = READ_ONCE(zero_stats);
45 if (unlikely(old)) { 45 if (unlikely(old)) {
46 ret = cmpxchg(&zero_stats, old, 0); 46 ret = cmpxchg(&zero_stats, old, 0);
47 /* This ensures only one fellow resets the stat */ 47 /* This ensures only one fellow resets the stat */
@@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
112 struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting); 112 struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
113 int cpu = smp_processor_id(); 113 int cpu = smp_processor_id();
114 u64 start; 114 u64 start;
115 __ticket_t head;
115 unsigned long flags; 116 unsigned long flags;
116 117
117 /* If kicker interrupts not initialized yet, just spin */ 118 /* If kicker interrupts not initialized yet, just spin */
@@ -159,11 +160,15 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
159 */ 160 */
160 __ticket_enter_slowpath(lock); 161 __ticket_enter_slowpath(lock);
161 162
163 /* make sure enter_slowpath, which is atomic does not cross the read */
164 smp_mb__after_atomic();
165
162 /* 166 /*
163 * check again make sure it didn't become free while 167 * check again make sure it didn't become free while
164 * we weren't looking 168 * we weren't looking
165 */ 169 */
166 if (ACCESS_ONCE(lock->tickets.head) == want) { 170 head = READ_ONCE(lock->tickets.head);
171 if (__tickets_equal(head, want)) {
167 add_stats(TAKEN_SLOW_PICKUP, 1); 172 add_stats(TAKEN_SLOW_PICKUP, 1);
168 goto out; 173 goto out;
169 } 174 }
@@ -204,8 +209,8 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
204 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu); 209 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
205 210
206 /* Make sure we read lock before want */ 211 /* Make sure we read lock before want */
207 if (ACCESS_ONCE(w->lock) == lock && 212 if (READ_ONCE(w->lock) == lock &&
208 ACCESS_ONCE(w->want) == next) { 213 READ_ONCE(w->want) == next) {
209 add_stats(RELEASED_SLOW_KICKED, 1); 214 add_stats(RELEASED_SLOW_KICKED, 1);
210 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); 215 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
211 break; 216 break;
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 876eb380aa26..147b26ed9c91 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -182,13 +182,13 @@
182#define get_fs() (current->thread.current_ds) 182#define get_fs() (current->thread.current_ds)
183#define set_fs(val) (current->thread.current_ds = (val)) 183#define set_fs(val) (current->thread.current_ds = (val))
184 184
185#define segment_eq(a,b) ((a).seg == (b).seg) 185#define segment_eq(a, b) ((a).seg == (b).seg)
186 186
187#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 187#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
188#define __user_ok(addr,size) \ 188#define __user_ok(addr, size) \
189 (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) 189 (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
190#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) 190#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
191#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) 191#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
192 192
193/* 193/*
194 * These are the main single-value transfer routines. They 194 * These are the main single-value transfer routines. They
@@ -204,8 +204,8 @@
204 * (a) re-use the arguments for side effects (sizeof is ok) 204 * (a) re-use the arguments for side effects (sizeof is ok)
205 * (b) require any knowledge of processes at this stage 205 * (b) require any knowledge of processes at this stage
206 */ 206 */
207#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) 207#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
208#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) 208#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
209 209
210/* 210/*
211 * The "__xxx" versions of the user access functions are versions that 211 * The "__xxx" versions of the user access functions are versions that
@@ -213,39 +213,39 @@
213 * with a separate "access_ok()" call (this is used when we do multiple 213 * with a separate "access_ok()" call (this is used when we do multiple
214 * accesses to the same area of user memory). 214 * accesses to the same area of user memory).
215 */ 215 */
216#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr))) 216#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
217#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 217#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
218 218
219 219
220extern long __put_user_bad(void); 220extern long __put_user_bad(void);
221 221
222#define __put_user_nocheck(x,ptr,size) \ 222#define __put_user_nocheck(x, ptr, size) \
223({ \ 223({ \
224 long __pu_err; \ 224 long __pu_err; \
225 __put_user_size((x),(ptr),(size),__pu_err); \ 225 __put_user_size((x), (ptr), (size), __pu_err); \
226 __pu_err; \ 226 __pu_err; \
227}) 227})
228 228
229#define __put_user_check(x,ptr,size) \ 229#define __put_user_check(x, ptr, size) \
230({ \ 230({ \
231 long __pu_err = -EFAULT; \ 231 long __pu_err = -EFAULT; \
232 __typeof__(*(ptr)) *__pu_addr = (ptr); \ 232 __typeof__(*(ptr)) *__pu_addr = (ptr); \
233 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ 233 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
234 __put_user_size((x),__pu_addr,(size),__pu_err); \ 234 __put_user_size((x), __pu_addr, (size), __pu_err); \
235 __pu_err; \ 235 __pu_err; \
236}) 236})
237 237
238#define __put_user_size(x,ptr,size,retval) \ 238#define __put_user_size(x, ptr, size, retval) \
239do { \ 239do { \
240 int __cb; \ 240 int __cb; \
241 retval = 0; \ 241 retval = 0; \
242 switch (size) { \ 242 switch (size) { \
243 case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ 243 case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb); break; \
244 case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ 244 case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \
245 case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ 245 case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
246 case 8: { \ 246 case 8: { \
247 __typeof__(*ptr) __v64 = x; \ 247 __typeof__(*ptr) __v64 = x; \
248 retval = __copy_to_user(ptr,&__v64,8); \ 248 retval = __copy_to_user(ptr, &__v64, 8); \
249 break; \ 249 break; \
250 } \ 250 } \
251 default: __put_user_bad(); \ 251 default: __put_user_bad(); \
@@ -316,35 +316,35 @@ __asm__ __volatile__( \
316 :"=r" (err), "=r" (cb) \ 316 :"=r" (err), "=r" (cb) \
317 :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err)) 317 :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
318 318
319#define __get_user_nocheck(x,ptr,size) \ 319#define __get_user_nocheck(x, ptr, size) \
320({ \ 320({ \
321 long __gu_err, __gu_val; \ 321 long __gu_err, __gu_val; \
322 __get_user_size(__gu_val,(ptr),(size),__gu_err); \ 322 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
323 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 323 (x) = (__force __typeof__(*(ptr)))__gu_val; \
324 __gu_err; \ 324 __gu_err; \
325}) 325})
326 326
327#define __get_user_check(x,ptr,size) \ 327#define __get_user_check(x, ptr, size) \
328({ \ 328({ \
329 long __gu_err = -EFAULT, __gu_val = 0; \ 329 long __gu_err = -EFAULT, __gu_val = 0; \
330 const __typeof__(*(ptr)) *__gu_addr = (ptr); \ 330 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
331 if (access_ok(VERIFY_READ,__gu_addr,size)) \ 331 if (access_ok(VERIFY_READ, __gu_addr, size)) \
332 __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ 332 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
333 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 333 (x) = (__force __typeof__(*(ptr)))__gu_val; \
334 __gu_err; \ 334 __gu_err; \
335}) 335})
336 336
337extern long __get_user_bad(void); 337extern long __get_user_bad(void);
338 338
339#define __get_user_size(x,ptr,size,retval) \ 339#define __get_user_size(x, ptr, size, retval) \
340do { \ 340do { \
341 int __cb; \ 341 int __cb; \
342 retval = 0; \ 342 retval = 0; \
343 switch (size) { \ 343 switch (size) { \
344 case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ 344 case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
345 case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ 345 case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
346 case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ 346 case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
347 case 8: retval = __copy_from_user(&x,ptr,8); break; \ 347 case 8: retval = __copy_from_user(&x, ptr, 8); break; \
348 default: (x) = __get_user_bad(); \ 348 default: (x) = __get_user_bad(); \
349 } \ 349 } \
350} while (0) 350} while (0)
@@ -390,19 +390,19 @@ __asm__ __volatile__( \
390 */ 390 */
391 391
392extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); 392extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
393#define __copy_user(to,from,size) __xtensa_copy_user(to,from,size) 393#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size)
394 394
395 395
396static inline unsigned long 396static inline unsigned long
397__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) 397__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
398{ 398{
399 return __copy_user(to,from,n); 399 return __copy_user(to, from, n);
400} 400}
401 401
402static inline unsigned long 402static inline unsigned long
403__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) 403__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
404{ 404{
405 return __copy_user(to,from,n); 405 return __copy_user(to, from, n);
406} 406}
407 407
408static inline unsigned long 408static inline unsigned long
@@ -410,7 +410,7 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n)
410{ 410{
411 prefetch(from); 411 prefetch(from);
412 if (access_ok(VERIFY_WRITE, to, n)) 412 if (access_ok(VERIFY_WRITE, to, n))
413 return __copy_user(to,from,n); 413 return __copy_user(to, from, n);
414 return n; 414 return n;
415} 415}
416 416
@@ -419,18 +419,18 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
419{ 419{
420 prefetchw(to); 420 prefetchw(to);
421 if (access_ok(VERIFY_READ, from, n)) 421 if (access_ok(VERIFY_READ, from, n))
422 return __copy_user(to,from,n); 422 return __copy_user(to, from, n);
423 else 423 else
424 memset(to, 0, n); 424 memset(to, 0, n);
425 return n; 425 return n;
426} 426}
427 427
428#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) 428#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
429#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) 429#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
430#define __copy_to_user(to,from,n) \ 430#define __copy_to_user(to, from, n) \
431 __generic_copy_to_user_nocheck((to),(from),(n)) 431 __generic_copy_to_user_nocheck((to), (from), (n))
432#define __copy_from_user(to,from,n) \ 432#define __copy_from_user(to, from, n) \
433 __generic_copy_from_user_nocheck((to),(from),(n)) 433 __generic_copy_from_user_nocheck((to), (from), (n))
434#define __copy_to_user_inatomic __copy_to_user 434#define __copy_to_user_inatomic __copy_to_user
435#define __copy_from_user_inatomic __copy_from_user 435#define __copy_from_user_inatomic __copy_from_user
436 436
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 9273d0969ebd..5b9c6d5c3636 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1292,6 +1292,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
1292 struct blkg_rwstat rwstat = { }, tmp; 1292 struct blkg_rwstat rwstat = { }, tmp;
1293 int i, cpu; 1293 int i, cpu;
1294 1294
1295 if (tg->stats_cpu == NULL)
1296 return 0;
1297
1295 for_each_possible_cpu(cpu) { 1298 for_each_possible_cpu(cpu) {
1296 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); 1299 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
1297 1300
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index b18cd2151ddb..623b117ad1a2 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -55,6 +55,7 @@ acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
55ifdef CONFIG_ACPI_VIDEO 55ifdef CONFIG_ACPI_VIDEO
56acpi-y += video_detect.o 56acpi-y += video_detect.o
57endif 57endif
58acpi-y += acpi_lpat.o
58 59
59# These are (potentially) separate modules 60# These are (potentially) separate modules
60 61
diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c
new file mode 100644
index 000000000000..feb61c1630eb
--- /dev/null
+++ b/drivers/acpi/acpi_lpat.c
@@ -0,0 +1,161 @@
1/*
2 * acpi_lpat.c - LPAT table processing functions
3 *
4 * Copyright (C) 2015 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/acpi.h>
18#include <acpi/acpi_lpat.h>
19
20/**
21 * acpi_lpat_raw_to_temp(): Return temperature from raw value through
22 * LPAT conversion table
23 *
24 * @lpat_table: the temperature_raw mapping table structure
25 * @raw: the raw value, used as a key to get the temerature from the
26 * above mapping table
27 *
28 * A positive converted temperarure value will be returned on success,
29 * a negative errno will be returned in error cases.
30 */
31int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
32 int raw)
33{
34 int i, delta_temp, delta_raw, temp;
35 struct acpi_lpat *lpat = lpat_table->lpat;
36
37 for (i = 0; i < lpat_table->lpat_count - 1; i++) {
38 if ((raw >= lpat[i].raw && raw <= lpat[i+1].raw) ||
39 (raw <= lpat[i].raw && raw >= lpat[i+1].raw))
40 break;
41 }
42
43 if (i == lpat_table->lpat_count - 1)
44 return -ENOENT;
45
46 delta_temp = lpat[i+1].temp - lpat[i].temp;
47 delta_raw = lpat[i+1].raw - lpat[i].raw;
48 temp = lpat[i].temp + (raw - lpat[i].raw) * delta_temp / delta_raw;
49
50 return temp;
51}
52EXPORT_SYMBOL_GPL(acpi_lpat_raw_to_temp);
53
54/**
55 * acpi_lpat_temp_to_raw(): Return raw value from temperature through
56 * LPAT conversion table
57 *
58 * @lpat: the temperature_raw mapping table
59 * @temp: the temperature, used as a key to get the raw value from the
60 * above mapping table
61 *
62 * A positive converted temperature value will be returned on success,
63 * a negative errno will be returned in error cases.
64 */
65int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
66 int temp)
67{
68 int i, delta_temp, delta_raw, raw;
69 struct acpi_lpat *lpat = lpat_table->lpat;
70
71 for (i = 0; i < lpat_table->lpat_count - 1; i++) {
72 if (temp >= lpat[i].temp && temp <= lpat[i+1].temp)
73 break;
74 }
75
76 if (i == lpat_table->lpat_count - 1)
77 return -ENOENT;
78
79 delta_temp = lpat[i+1].temp - lpat[i].temp;
80 delta_raw = lpat[i+1].raw - lpat[i].raw;
81 raw = lpat[i].raw + (temp - lpat[i].temp) * delta_raw / delta_temp;
82
83 return raw;
84}
85EXPORT_SYMBOL_GPL(acpi_lpat_temp_to_raw);
86
87/**
88 * acpi_lpat_get_conversion_table(): Parse ACPI LPAT table if present.
89 *
90 * @handle: Handle to acpi device
91 *
92 * Parse LPAT table to a struct of type acpi_lpat_table. On success
93 * it returns a pointer to newly allocated table. This table must
94 * be freed by the caller when finished processing, using a call to
95 * acpi_lpat_free_conversion_table.
96 */
97struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(acpi_handle
98 handle)
99{
100 struct acpi_lpat_conversion_table *lpat_table = NULL;
101 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
102 union acpi_object *obj_p, *obj_e;
103 int *lpat, i;
104 acpi_status status;
105
106 status = acpi_evaluate_object(handle, "LPAT", NULL, &buffer);
107 if (ACPI_FAILURE(status))
108 return NULL;
109
110 obj_p = (union acpi_object *)buffer.pointer;
111 if (!obj_p || (obj_p->type != ACPI_TYPE_PACKAGE) ||
112 (obj_p->package.count % 2) || (obj_p->package.count < 4))
113 goto out;
114
115 lpat = kcalloc(obj_p->package.count, sizeof(int), GFP_KERNEL);
116 if (!lpat)
117 goto out;
118
119 for (i = 0; i < obj_p->package.count; i++) {
120 obj_e = &obj_p->package.elements[i];
121 if (obj_e->type != ACPI_TYPE_INTEGER) {
122 kfree(lpat);
123 goto out;
124 }
125 lpat[i] = (s64)obj_e->integer.value;
126 }
127
128 lpat_table = kzalloc(sizeof(*lpat_table), GFP_KERNEL);
129 if (!lpat_table) {
130 kfree(lpat);
131 goto out;
132 }
133
134 lpat_table->lpat = (struct acpi_lpat *)lpat;
135 lpat_table->lpat_count = obj_p->package.count / 2;
136
137out:
138 kfree(buffer.pointer);
139 return lpat_table;
140}
141EXPORT_SYMBOL_GPL(acpi_lpat_get_conversion_table);
142
143/**
144 * acpi_lpat_free_conversion_table(): Free LPAT table.
145 *
146 * @lpat_table: the temperature_raw mapping table structure
147 *
148 * Frees the LPAT table previously allocated by a call to
149 * acpi_lpat_get_conversion_table.
150 */
151void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
152 *lpat_table)
153{
154 if (lpat_table) {
155 kfree(lpat_table->lpat);
156 kfree(lpat_table);
157 }
158}
159EXPORT_SYMBOL_GPL(acpi_lpat_free_conversion_table);
160
161MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 02e835f3cf8a..657964e8ab7e 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -105,7 +105,7 @@ static void lpss_uart_setup(struct lpss_private_data *pdata)
105 } 105 }
106} 106}
107 107
108static void byt_i2c_setup(struct lpss_private_data *pdata) 108static void lpss_deassert_reset(struct lpss_private_data *pdata)
109{ 109{
110 unsigned int offset; 110 unsigned int offset;
111 u32 val; 111 u32 val;
@@ -114,9 +114,18 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
114 val = readl(pdata->mmio_base + offset); 114 val = readl(pdata->mmio_base + offset);
115 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC; 115 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
116 writel(val, pdata->mmio_base + offset); 116 writel(val, pdata->mmio_base + offset);
117}
118
119#define LPSS_I2C_ENABLE 0x6c
120
121static void byt_i2c_setup(struct lpss_private_data *pdata)
122{
123 lpss_deassert_reset(pdata);
117 124
118 if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset)) 125 if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
119 pdata->fixed_clk_rate = 133000000; 126 pdata->fixed_clk_rate = 133000000;
127
128 writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
120} 129}
121 130
122static struct lpss_device_desc lpt_dev_desc = { 131static struct lpss_device_desc lpt_dev_desc = {
@@ -125,7 +134,7 @@ static struct lpss_device_desc lpt_dev_desc = {
125}; 134};
126 135
127static struct lpss_device_desc lpt_i2c_dev_desc = { 136static struct lpss_device_desc lpt_i2c_dev_desc = {
128 .flags = LPSS_CLK | LPSS_LTR, 137 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
129 .prv_offset = 0x800, 138 .prv_offset = 0x800,
130}; 139};
131 140
@@ -166,6 +175,12 @@ static struct lpss_device_desc byt_i2c_dev_desc = {
166 .setup = byt_i2c_setup, 175 .setup = byt_i2c_setup,
167}; 176};
168 177
178static struct lpss_device_desc bsw_spi_dev_desc = {
179 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
180 .prv_offset = 0x400,
181 .setup = lpss_deassert_reset,
182};
183
169#else 184#else
170 185
171#define LPSS_ADDR(desc) (0UL) 186#define LPSS_ADDR(desc) (0UL)
@@ -198,7 +213,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
198 /* Braswell LPSS devices */ 213 /* Braswell LPSS devices */
199 { "80862288", LPSS_ADDR(byt_pwm_dev_desc) }, 214 { "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
200 { "8086228A", LPSS_ADDR(byt_uart_dev_desc) }, 215 { "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
201 { "8086228E", LPSS_ADDR(byt_spi_dev_desc) }, 216 { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
202 { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) }, 217 { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
203 218
204 { "INT3430", LPSS_ADDR(lpt_dev_desc) }, 219 { "INT3430", LPSS_ADDR(lpt_dev_desc) },
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 982b67faaaf3..a8dd2f763382 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -680,7 +680,7 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
680 /* Enable GPE for event processing (SCI_EVT=1) */ 680 /* Enable GPE for event processing (SCI_EVT=1) */
681 if (!resuming) 681 if (!resuming)
682 acpi_ec_submit_request(ec); 682 acpi_ec_submit_request(ec);
683 pr_info("+++++ EC started +++++\n"); 683 pr_debug("EC started\n");
684 } 684 }
685 spin_unlock_irqrestore(&ec->lock, flags); 685 spin_unlock_irqrestore(&ec->lock, flags);
686} 686}
@@ -712,7 +712,7 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
712 acpi_ec_complete_request(ec); 712 acpi_ec_complete_request(ec);
713 clear_bit(EC_FLAGS_STARTED, &ec->flags); 713 clear_bit(EC_FLAGS_STARTED, &ec->flags);
714 clear_bit(EC_FLAGS_STOPPED, &ec->flags); 714 clear_bit(EC_FLAGS_STOPPED, &ec->flags);
715 pr_info("+++++ EC stopped +++++\n"); 715 pr_debug("EC stopped\n");
716 } 716 }
717 spin_unlock_irqrestore(&ec->lock, flags); 717 spin_unlock_irqrestore(&ec->lock, flags);
718} 718}
diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c
index a732e5d7e322..bd772cd56494 100644
--- a/drivers/acpi/pmic/intel_pmic.c
+++ b/drivers/acpi/pmic/intel_pmic.c
@@ -16,20 +16,15 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/acpi.h> 17#include <linux/acpi.h>
18#include <linux/regmap.h> 18#include <linux/regmap.h>
19#include <acpi/acpi_lpat.h>
19#include "intel_pmic.h" 20#include "intel_pmic.h"
20 21
21#define PMIC_POWER_OPREGION_ID 0x8d 22#define PMIC_POWER_OPREGION_ID 0x8d
22#define PMIC_THERMAL_OPREGION_ID 0x8c 23#define PMIC_THERMAL_OPREGION_ID 0x8c
23 24
24struct acpi_lpat {
25 int temp;
26 int raw;
27};
28
29struct intel_pmic_opregion { 25struct intel_pmic_opregion {
30 struct mutex lock; 26 struct mutex lock;
31 struct acpi_lpat *lpat; 27 struct acpi_lpat_conversion_table *lpat_table;
32 int lpat_count;
33 struct regmap *regmap; 28 struct regmap *regmap;
34 struct intel_pmic_opregion_data *data; 29 struct intel_pmic_opregion_data *data;
35}; 30};
@@ -50,105 +45,6 @@ static int pmic_get_reg_bit(int address, struct pmic_table *table,
50 return -ENOENT; 45 return -ENOENT;
51} 46}
52 47
53/**
54 * raw_to_temp(): Return temperature from raw value through LPAT table
55 *
56 * @lpat: the temperature_raw mapping table
57 * @count: the count of the above mapping table
58 * @raw: the raw value, used as a key to get the temerature from the
59 * above mapping table
60 *
61 * A positive value will be returned on success, a negative errno will
62 * be returned in error cases.
63 */
64static int raw_to_temp(struct acpi_lpat *lpat, int count, int raw)
65{
66 int i, delta_temp, delta_raw, temp;
67
68 for (i = 0; i < count - 1; i++) {
69 if ((raw >= lpat[i].raw && raw <= lpat[i+1].raw) ||
70 (raw <= lpat[i].raw && raw >= lpat[i+1].raw))
71 break;
72 }
73
74 if (i == count - 1)
75 return -ENOENT;
76
77 delta_temp = lpat[i+1].temp - lpat[i].temp;
78 delta_raw = lpat[i+1].raw - lpat[i].raw;
79 temp = lpat[i].temp + (raw - lpat[i].raw) * delta_temp / delta_raw;
80
81 return temp;
82}
83
84/**
85 * temp_to_raw(): Return raw value from temperature through LPAT table
86 *
87 * @lpat: the temperature_raw mapping table
88 * @count: the count of the above mapping table
89 * @temp: the temperature, used as a key to get the raw value from the
90 * above mapping table
91 *
92 * A positive value will be returned on success, a negative errno will
93 * be returned in error cases.
94 */
95static int temp_to_raw(struct acpi_lpat *lpat, int count, int temp)
96{
97 int i, delta_temp, delta_raw, raw;
98
99 for (i = 0; i < count - 1; i++) {
100 if (temp >= lpat[i].temp && temp <= lpat[i+1].temp)
101 break;
102 }
103
104 if (i == count - 1)
105 return -ENOENT;
106
107 delta_temp = lpat[i+1].temp - lpat[i].temp;
108 delta_raw = lpat[i+1].raw - lpat[i].raw;
109 raw = lpat[i].raw + (temp - lpat[i].temp) * delta_raw / delta_temp;
110
111 return raw;
112}
113
114static void pmic_thermal_lpat(struct intel_pmic_opregion *opregion,
115 acpi_handle handle, struct device *dev)
116{
117 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
118 union acpi_object *obj_p, *obj_e;
119 int *lpat, i;
120 acpi_status status;
121
122 status = acpi_evaluate_object(handle, "LPAT", NULL, &buffer);
123 if (ACPI_FAILURE(status))
124 return;
125
126 obj_p = (union acpi_object *)buffer.pointer;
127 if (!obj_p || (obj_p->type != ACPI_TYPE_PACKAGE) ||
128 (obj_p->package.count % 2) || (obj_p->package.count < 4))
129 goto out;
130
131 lpat = devm_kmalloc(dev, sizeof(int) * obj_p->package.count,
132 GFP_KERNEL);
133 if (!lpat)
134 goto out;
135
136 for (i = 0; i < obj_p->package.count; i++) {
137 obj_e = &obj_p->package.elements[i];
138 if (obj_e->type != ACPI_TYPE_INTEGER) {
139 devm_kfree(dev, lpat);
140 goto out;
141 }
142 lpat[i] = (s64)obj_e->integer.value;
143 }
144
145 opregion->lpat = (struct acpi_lpat *)lpat;
146 opregion->lpat_count = obj_p->package.count / 2;
147
148out:
149 kfree(buffer.pointer);
150}
151
152static acpi_status intel_pmic_power_handler(u32 function, 48static acpi_status intel_pmic_power_handler(u32 function,
153 acpi_physical_address address, u32 bits, u64 *value64, 49 acpi_physical_address address, u32 bits, u64 *value64,
154 void *handler_context, void *region_context) 50 void *handler_context, void *region_context)
@@ -192,12 +88,12 @@ static int pmic_read_temp(struct intel_pmic_opregion *opregion,
192 if (raw_temp < 0) 88 if (raw_temp < 0)
193 return raw_temp; 89 return raw_temp;
194 90
195 if (!opregion->lpat) { 91 if (!opregion->lpat_table) {
196 *value = raw_temp; 92 *value = raw_temp;
197 return 0; 93 return 0;
198 } 94 }
199 95
200 temp = raw_to_temp(opregion->lpat, opregion->lpat_count, raw_temp); 96 temp = acpi_lpat_raw_to_temp(opregion->lpat_table, raw_temp);
201 if (temp < 0) 97 if (temp < 0)
202 return temp; 98 return temp;
203 99
@@ -223,9 +119,8 @@ static int pmic_thermal_aux(struct intel_pmic_opregion *opregion, int reg,
223 if (!opregion->data->update_aux) 119 if (!opregion->data->update_aux)
224 return -ENXIO; 120 return -ENXIO;
225 121
226 if (opregion->lpat) { 122 if (opregion->lpat_table) {
227 raw_temp = temp_to_raw(opregion->lpat, opregion->lpat_count, 123 raw_temp = acpi_lpat_temp_to_raw(opregion->lpat_table, *value);
228 *value);
229 if (raw_temp < 0) 124 if (raw_temp < 0)
230 return raw_temp; 125 return raw_temp;
231 } else { 126 } else {
@@ -314,6 +209,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
314{ 209{
315 acpi_status status; 210 acpi_status status;
316 struct intel_pmic_opregion *opregion; 211 struct intel_pmic_opregion *opregion;
212 int ret;
317 213
318 if (!dev || !regmap || !d) 214 if (!dev || !regmap || !d)
319 return -EINVAL; 215 return -EINVAL;
@@ -327,14 +223,16 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
327 223
328 mutex_init(&opregion->lock); 224 mutex_init(&opregion->lock);
329 opregion->regmap = regmap; 225 opregion->regmap = regmap;
330 pmic_thermal_lpat(opregion, handle, dev); 226 opregion->lpat_table = acpi_lpat_get_conversion_table(handle);
331 227
332 status = acpi_install_address_space_handler(handle, 228 status = acpi_install_address_space_handler(handle,
333 PMIC_POWER_OPREGION_ID, 229 PMIC_POWER_OPREGION_ID,
334 intel_pmic_power_handler, 230 intel_pmic_power_handler,
335 NULL, opregion); 231 NULL, opregion);
336 if (ACPI_FAILURE(status)) 232 if (ACPI_FAILURE(status)) {
337 return -ENODEV; 233 ret = -ENODEV;
234 goto out_error;
235 }
338 236
339 status = acpi_install_address_space_handler(handle, 237 status = acpi_install_address_space_handler(handle,
340 PMIC_THERMAL_OPREGION_ID, 238 PMIC_THERMAL_OPREGION_ID,
@@ -343,11 +241,16 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
343 if (ACPI_FAILURE(status)) { 241 if (ACPI_FAILURE(status)) {
344 acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID, 242 acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID,
345 intel_pmic_power_handler); 243 intel_pmic_power_handler);
346 return -ENODEV; 244 ret = -ENODEV;
245 goto out_error;
347 } 246 }
348 247
349 opregion->data = d; 248 opregion->data = d;
350 return 0; 249 return 0;
250
251out_error:
252 acpi_lpat_free_conversion_table(opregion->lpat_table);
253 return ret;
351} 254}
352EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler); 255EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
353 256
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 4752b9939987..c723668e3e27 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -46,7 +46,7 @@ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
46 if (len && reslen && reslen == len && start <= end) 46 if (len && reslen && reslen == len && start <= end)
47 return true; 47 return true;
48 48
49 pr_info("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n", 49 pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
50 io ? "io" : "mem", start, end, len); 50 io ? "io" : "mem", start, end, len);
51 51
52 return false; 52 return false;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 88a4f99dd2a7..debd30917010 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -540,6 +540,15 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
540 DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"), 540 DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
541 }, 541 },
542 }, 542 },
543 {
544 /* https://bugs.freedesktop.org/show_bug.cgi?id=87286 */
545 .callback = video_disable_native_backlight,
546 .ident = "SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D",
547 .matches = {
548 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
549 DMI_MATCH(DMI_PRODUCT_NAME, "900X3C/900X3D/900X3E/900X4C/900X4D"),
550 },
551 },
543 552
544 { 553 {
545 /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ 554 /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index cbdfbbf98392..ceb32dd52a6c 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -37,17 +37,18 @@
37#include <linux/ptrace.h> 37#include <linux/ptrace.h>
38#include <linux/sched.h> 38#include <linux/sched.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/t10-pi.h>
40#include <linux/types.h> 41#include <linux/types.h>
41#include <scsi/sg.h> 42#include <scsi/sg.h>
42#include <asm-generic/io-64-nonatomic-lo-hi.h> 43#include <asm-generic/io-64-nonatomic-lo-hi.h>
43 44
45#define NVME_MINORS (1U << MINORBITS)
44#define NVME_Q_DEPTH 1024 46#define NVME_Q_DEPTH 1024
45#define NVME_AQ_DEPTH 64 47#define NVME_AQ_DEPTH 64
46#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 48#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
47#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 49#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
48#define ADMIN_TIMEOUT (admin_timeout * HZ) 50#define ADMIN_TIMEOUT (admin_timeout * HZ)
49#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ) 51#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
50#define IOD_TIMEOUT (retry_time * HZ)
51 52
52static unsigned char admin_timeout = 60; 53static unsigned char admin_timeout = 60;
53module_param(admin_timeout, byte, 0644); 54module_param(admin_timeout, byte, 0644);
@@ -57,10 +58,6 @@ unsigned char nvme_io_timeout = 30;
57module_param_named(io_timeout, nvme_io_timeout, byte, 0644); 58module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
58MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 59MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
59 60
60static unsigned char retry_time = 30;
61module_param(retry_time, byte, 0644);
62MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");
63
64static unsigned char shutdown_timeout = 5; 61static unsigned char shutdown_timeout = 5;
65module_param(shutdown_timeout, byte, 0644); 62module_param(shutdown_timeout, byte, 0644);
66MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 63MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
@@ -68,6 +65,9 @@ MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown")
68static int nvme_major; 65static int nvme_major;
69module_param(nvme_major, int, 0); 66module_param(nvme_major, int, 0);
70 67
68static int nvme_char_major;
69module_param(nvme_char_major, int, 0);
70
71static int use_threaded_interrupts; 71static int use_threaded_interrupts;
72module_param(use_threaded_interrupts, int, 0); 72module_param(use_threaded_interrupts, int, 0);
73 73
@@ -76,7 +76,8 @@ static LIST_HEAD(dev_list);
76static struct task_struct *nvme_thread; 76static struct task_struct *nvme_thread;
77static struct workqueue_struct *nvme_workq; 77static struct workqueue_struct *nvme_workq;
78static wait_queue_head_t nvme_kthread_wait; 78static wait_queue_head_t nvme_kthread_wait;
79static struct notifier_block nvme_nb; 79
80static struct class *nvme_class;
80 81
81static void nvme_reset_failed_dev(struct work_struct *ws); 82static void nvme_reset_failed_dev(struct work_struct *ws);
82static int nvme_process_cq(struct nvme_queue *nvmeq); 83static int nvme_process_cq(struct nvme_queue *nvmeq);
@@ -95,7 +96,6 @@ struct async_cmd_info {
95 * commands and one for I/O commands). 96 * commands and one for I/O commands).
96 */ 97 */
97struct nvme_queue { 98struct nvme_queue {
98 struct llist_node node;
99 struct device *q_dmadev; 99 struct device *q_dmadev;
100 struct nvme_dev *dev; 100 struct nvme_dev *dev;
101 char irqname[24]; /* nvme4294967295-65535\0 */ 101 char irqname[24]; /* nvme4294967295-65535\0 */
@@ -482,6 +482,115 @@ static int nvme_error_status(u16 status)
482 } 482 }
483} 483}
484 484
485#ifdef CONFIG_BLK_DEV_INTEGRITY
486static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
487{
488 if (be32_to_cpu(pi->ref_tag) == v)
489 pi->ref_tag = cpu_to_be32(p);
490}
491
492static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
493{
494 if (be32_to_cpu(pi->ref_tag) == p)
495 pi->ref_tag = cpu_to_be32(v);
496}
497
498/**
499 * nvme_dif_remap - remaps ref tags to bip seed and physical lba
500 *
501 * The virtual start sector is the one that was originally submitted by the
502 * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical
503 * start sector may be different. Remap protection information to match the
504 * physical LBA on writes, and back to the original seed on reads.
505 *
506 * Type 0 and 3 do not have a ref tag, so no remapping required.
507 */
508static void nvme_dif_remap(struct request *req,
509 void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
510{
511 struct nvme_ns *ns = req->rq_disk->private_data;
512 struct bio_integrity_payload *bip;
513 struct t10_pi_tuple *pi;
514 void *p, *pmap;
515 u32 i, nlb, ts, phys, virt;
516
517 if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
518 return;
519
520 bip = bio_integrity(req->bio);
521 if (!bip)
522 return;
523
524 pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
525 if (!pmap)
526 return;
527
528 p = pmap;
529 virt = bip_get_seed(bip);
530 phys = nvme_block_nr(ns, blk_rq_pos(req));
531 nlb = (blk_rq_bytes(req) >> ns->lba_shift);
532 ts = ns->disk->integrity->tuple_size;
533
534 for (i = 0; i < nlb; i++, virt++, phys++) {
535 pi = (struct t10_pi_tuple *)p;
536 dif_swap(phys, virt, pi);
537 p += ts;
538 }
539 kunmap_atomic(pmap);
540}
541
542static int nvme_noop_verify(struct blk_integrity_iter *iter)
543{
544 return 0;
545}
546
547static int nvme_noop_generate(struct blk_integrity_iter *iter)
548{
549 return 0;
550}
551
552struct blk_integrity nvme_meta_noop = {
553 .name = "NVME_META_NOOP",
554 .generate_fn = nvme_noop_generate,
555 .verify_fn = nvme_noop_verify,
556};
557
558static void nvme_init_integrity(struct nvme_ns *ns)
559{
560 struct blk_integrity integrity;
561
562 switch (ns->pi_type) {
563 case NVME_NS_DPS_PI_TYPE3:
564 integrity = t10_pi_type3_crc;
565 break;
566 case NVME_NS_DPS_PI_TYPE1:
567 case NVME_NS_DPS_PI_TYPE2:
568 integrity = t10_pi_type1_crc;
569 break;
570 default:
571 integrity = nvme_meta_noop;
572 break;
573 }
574 integrity.tuple_size = ns->ms;
575 blk_integrity_register(ns->disk, &integrity);
576 blk_queue_max_integrity_segments(ns->queue, 1);
577}
578#else /* CONFIG_BLK_DEV_INTEGRITY */
579static void nvme_dif_remap(struct request *req,
580 void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
581{
582}
583static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
584{
585}
586static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
587{
588}
589static void nvme_init_integrity(struct nvme_ns *ns)
590{
591}
592#endif
593
485static void req_completion(struct nvme_queue *nvmeq, void *ctx, 594static void req_completion(struct nvme_queue *nvmeq, void *ctx,
486 struct nvme_completion *cqe) 595 struct nvme_completion *cqe)
487{ 596{
@@ -512,9 +621,16 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
512 "completing aborted command with status:%04x\n", 621 "completing aborted command with status:%04x\n",
513 status); 622 status);
514 623
515 if (iod->nents) 624 if (iod->nents) {
516 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents, 625 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents,
517 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 626 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
627 if (blk_integrity_rq(req)) {
628 if (!rq_data_dir(req))
629 nvme_dif_remap(req, nvme_dif_complete);
630 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->meta_sg, 1,
631 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
632 }
633 }
518 nvme_free_iod(nvmeq->dev, iod); 634 nvme_free_iod(nvmeq->dev, iod);
519 635
520 blk_mq_complete_request(req); 636 blk_mq_complete_request(req);
@@ -670,6 +786,24 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
670 cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); 786 cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
671 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 787 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
672 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 788 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
789
790 if (blk_integrity_rq(req)) {
791 cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg));
792 switch (ns->pi_type) {
793 case NVME_NS_DPS_PI_TYPE3:
794 control |= NVME_RW_PRINFO_PRCHK_GUARD;
795 break;
796 case NVME_NS_DPS_PI_TYPE1:
797 case NVME_NS_DPS_PI_TYPE2:
798 control |= NVME_RW_PRINFO_PRCHK_GUARD |
799 NVME_RW_PRINFO_PRCHK_REF;
800 cmnd->rw.reftag = cpu_to_le32(
801 nvme_block_nr(ns, blk_rq_pos(req)));
802 break;
803 }
804 } else if (ns->ms)
805 control |= NVME_RW_PRINFO_PRACT;
806
673 cmnd->rw.control = cpu_to_le16(control); 807 cmnd->rw.control = cpu_to_le16(control);
674 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 808 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
675 809
@@ -690,6 +824,19 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
690 struct nvme_iod *iod; 824 struct nvme_iod *iod;
691 enum dma_data_direction dma_dir; 825 enum dma_data_direction dma_dir;
692 826
827 /*
828 * If formated with metadata, require the block layer provide a buffer
829 * unless this namespace is formated such that the metadata can be
830 * stripped/generated by the controller with PRACT=1.
831 */
832 if (ns->ms && !blk_integrity_rq(req)) {
833 if (!(ns->pi_type && ns->ms == 8)) {
834 req->errors = -EFAULT;
835 blk_mq_complete_request(req);
836 return BLK_MQ_RQ_QUEUE_OK;
837 }
838 }
839
693 iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC); 840 iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC);
694 if (!iod) 841 if (!iod)
695 return BLK_MQ_RQ_QUEUE_BUSY; 842 return BLK_MQ_RQ_QUEUE_BUSY;
@@ -725,6 +872,21 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
725 iod->nents, dma_dir); 872 iod->nents, dma_dir);
726 goto retry_cmd; 873 goto retry_cmd;
727 } 874 }
875 if (blk_integrity_rq(req)) {
876 if (blk_rq_count_integrity_sg(req->q, req->bio) != 1)
877 goto error_cmd;
878
879 sg_init_table(iod->meta_sg, 1);
880 if (blk_rq_map_integrity_sg(
881 req->q, req->bio, iod->meta_sg) != 1)
882 goto error_cmd;
883
884 if (rq_data_dir(req))
885 nvme_dif_remap(req, nvme_dif_prep);
886
887 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir))
888 goto error_cmd;
889 }
728 } 890 }
729 891
730 nvme_set_info(cmd, iod, req_completion); 892 nvme_set_info(cmd, iod, req_completion);
@@ -817,14 +979,6 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
817 return IRQ_WAKE_THREAD; 979 return IRQ_WAKE_THREAD;
818} 980}
819 981
820static void nvme_abort_cmd_info(struct nvme_queue *nvmeq, struct nvme_cmd_info *
821 cmd_info)
822{
823 spin_lock_irq(&nvmeq->q_lock);
824 cancel_cmd_info(cmd_info, NULL);
825 spin_unlock_irq(&nvmeq->q_lock);
826}
827
828struct sync_cmd_info { 982struct sync_cmd_info {
829 struct task_struct *task; 983 struct task_struct *task;
830 u32 result; 984 u32 result;
@@ -847,7 +1001,6 @@ static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
847static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd, 1001static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
848 u32 *result, unsigned timeout) 1002 u32 *result, unsigned timeout)
849{ 1003{
850 int ret;
851 struct sync_cmd_info cmdinfo; 1004 struct sync_cmd_info cmdinfo;
852 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); 1005 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
853 struct nvme_queue *nvmeq = cmd_rq->nvmeq; 1006 struct nvme_queue *nvmeq = cmd_rq->nvmeq;
@@ -859,29 +1012,12 @@ static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
859 1012
860 nvme_set_info(cmd_rq, &cmdinfo, sync_completion); 1013 nvme_set_info(cmd_rq, &cmdinfo, sync_completion);
861 1014
862 set_current_state(TASK_KILLABLE); 1015 set_current_state(TASK_UNINTERRUPTIBLE);
863 ret = nvme_submit_cmd(nvmeq, cmd); 1016 nvme_submit_cmd(nvmeq, cmd);
864 if (ret) { 1017 schedule();
865 nvme_finish_cmd(nvmeq, req->tag, NULL);
866 set_current_state(TASK_RUNNING);
867 }
868 ret = schedule_timeout(timeout);
869
870 /*
871 * Ensure that sync_completion has either run, or that it will
872 * never run.
873 */
874 nvme_abort_cmd_info(nvmeq, blk_mq_rq_to_pdu(req));
875
876 /*
877 * We never got the completion
878 */
879 if (cmdinfo.status == -EINTR)
880 return -EINTR;
881 1018
882 if (result) 1019 if (result)
883 *result = cmdinfo.result; 1020 *result = cmdinfo.result;
884
885 return cmdinfo.status; 1021 return cmdinfo.status;
886} 1022}
887 1023
@@ -1158,29 +1294,18 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1158 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); 1294 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
1159 struct nvme_queue *nvmeq = cmd->nvmeq; 1295 struct nvme_queue *nvmeq = cmd->nvmeq;
1160 1296
1161 /*
1162 * The aborted req will be completed on receiving the abort req.
1163 * We enable the timer again. If hit twice, it'll cause a device reset,
1164 * as the device then is in a faulty state.
1165 */
1166 int ret = BLK_EH_RESET_TIMER;
1167
1168 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, 1297 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
1169 nvmeq->qid); 1298 nvmeq->qid);
1170
1171 spin_lock_irq(&nvmeq->q_lock); 1299 spin_lock_irq(&nvmeq->q_lock);
1172 if (!nvmeq->dev->initialized) { 1300 nvme_abort_req(req);
1173 /*
1174 * Force cancelled command frees the request, which requires we
1175 * return BLK_EH_NOT_HANDLED.
1176 */
1177 nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
1178 ret = BLK_EH_NOT_HANDLED;
1179 } else
1180 nvme_abort_req(req);
1181 spin_unlock_irq(&nvmeq->q_lock); 1301 spin_unlock_irq(&nvmeq->q_lock);
1182 1302
1183 return ret; 1303 /*
1304 * The aborted req will be completed on receiving the abort req.
1305 * We enable the timer again. If hit twice, it'll cause a device reset,
1306 * as the device then is in a faulty state.
1307 */
1308 return BLK_EH_RESET_TIMER;
1184} 1309}
1185 1310
1186static void nvme_free_queue(struct nvme_queue *nvmeq) 1311static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1233,7 +1358,6 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
1233 struct blk_mq_hw_ctx *hctx = nvmeq->hctx; 1358 struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
1234 1359
1235 spin_lock_irq(&nvmeq->q_lock); 1360 spin_lock_irq(&nvmeq->q_lock);
1236 nvme_process_cq(nvmeq);
1237 if (hctx && hctx->tags) 1361 if (hctx && hctx->tags)
1238 blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq); 1362 blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq);
1239 spin_unlock_irq(&nvmeq->q_lock); 1363 spin_unlock_irq(&nvmeq->q_lock);
@@ -1256,7 +1380,10 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1256 } 1380 }
1257 if (!qid && dev->admin_q) 1381 if (!qid && dev->admin_q)
1258 blk_mq_freeze_queue_start(dev->admin_q); 1382 blk_mq_freeze_queue_start(dev->admin_q);
1259 nvme_clear_queue(nvmeq); 1383
1384 spin_lock_irq(&nvmeq->q_lock);
1385 nvme_process_cq(nvmeq);
1386 spin_unlock_irq(&nvmeq->q_lock);
1260} 1387}
1261 1388
1262static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1389static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
@@ -1875,13 +2002,24 @@ static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
1875 return 0; 2002 return 0;
1876} 2003}
1877 2004
2005static void nvme_config_discard(struct nvme_ns *ns)
2006{
2007 u32 logical_block_size = queue_logical_block_size(ns->queue);
2008 ns->queue->limits.discard_zeroes_data = 0;
2009 ns->queue->limits.discard_alignment = logical_block_size;
2010 ns->queue->limits.discard_granularity = logical_block_size;
2011 ns->queue->limits.max_discard_sectors = 0xffffffff;
2012 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
2013}
2014
1878static int nvme_revalidate_disk(struct gendisk *disk) 2015static int nvme_revalidate_disk(struct gendisk *disk)
1879{ 2016{
1880 struct nvme_ns *ns = disk->private_data; 2017 struct nvme_ns *ns = disk->private_data;
1881 struct nvme_dev *dev = ns->dev; 2018 struct nvme_dev *dev = ns->dev;
1882 struct nvme_id_ns *id; 2019 struct nvme_id_ns *id;
1883 dma_addr_t dma_addr; 2020 dma_addr_t dma_addr;
1884 int lbaf; 2021 int lbaf, pi_type, old_ms;
2022 unsigned short bs;
1885 2023
1886 id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr, 2024 id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
1887 GFP_KERNEL); 2025 GFP_KERNEL);
@@ -1890,16 +2028,51 @@ static int nvme_revalidate_disk(struct gendisk *disk)
1890 __func__); 2028 __func__);
1891 return 0; 2029 return 0;
1892 } 2030 }
2031 if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) {
2032 dev_warn(&dev->pci_dev->dev,
2033 "identify failed ns:%d, setting capacity to 0\n",
2034 ns->ns_id);
2035 memset(id, 0, sizeof(*id));
2036 }
1893 2037
1894 if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) 2038 old_ms = ns->ms;
1895 goto free; 2039 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
1896
1897 lbaf = id->flbas & 0xf;
1898 ns->lba_shift = id->lbaf[lbaf].ds; 2040 ns->lba_shift = id->lbaf[lbaf].ds;
2041 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
2042
2043 /*
2044 * If identify namespace failed, use default 512 byte block size so
2045 * block layer can use before failing read/write for 0 capacity.
2046 */
2047 if (ns->lba_shift == 0)
2048 ns->lba_shift = 9;
2049 bs = 1 << ns->lba_shift;
2050
2051 /* XXX: PI implementation requires metadata equal t10 pi tuple size */
2052 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
2053 id->dps & NVME_NS_DPS_PI_MASK : 0;
2054
2055 if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
2056 ns->ms != old_ms ||
2057 bs != queue_logical_block_size(disk->queue) ||
2058 (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT)))
2059 blk_integrity_unregister(disk);
2060
2061 ns->pi_type = pi_type;
2062 blk_queue_logical_block_size(ns->queue, bs);
2063
2064 if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
2065 !(id->flbas & NVME_NS_FLBAS_META_EXT))
2066 nvme_init_integrity(ns);
2067
2068 if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk)))
2069 set_capacity(disk, 0);
2070 else
2071 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
2072
2073 if (dev->oncs & NVME_CTRL_ONCS_DSM)
2074 nvme_config_discard(ns);
1899 2075
1900 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1901 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1902 free:
1903 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr); 2076 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
1904 return 0; 2077 return 0;
1905} 2078}
@@ -1923,8 +2096,7 @@ static int nvme_kthread(void *data)
1923 spin_lock(&dev_list_lock); 2096 spin_lock(&dev_list_lock);
1924 list_for_each_entry_safe(dev, next, &dev_list, node) { 2097 list_for_each_entry_safe(dev, next, &dev_list, node) {
1925 int i; 2098 int i;
1926 if (readl(&dev->bar->csts) & NVME_CSTS_CFS && 2099 if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
1927 dev->initialized) {
1928 if (work_busy(&dev->reset_work)) 2100 if (work_busy(&dev->reset_work))
1929 continue; 2101 continue;
1930 list_del_init(&dev->node); 2102 list_del_init(&dev->node);
@@ -1956,30 +2128,16 @@ static int nvme_kthread(void *data)
1956 return 0; 2128 return 0;
1957} 2129}
1958 2130
1959static void nvme_config_discard(struct nvme_ns *ns) 2131static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
1960{
1961 u32 logical_block_size = queue_logical_block_size(ns->queue);
1962 ns->queue->limits.discard_zeroes_data = 0;
1963 ns->queue->limits.discard_alignment = logical_block_size;
1964 ns->queue->limits.discard_granularity = logical_block_size;
1965 ns->queue->limits.max_discard_sectors = 0xffffffff;
1966 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
1967}
1968
1969static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1970 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
1971{ 2132{
1972 struct nvme_ns *ns; 2133 struct nvme_ns *ns;
1973 struct gendisk *disk; 2134 struct gendisk *disk;
1974 int node = dev_to_node(&dev->pci_dev->dev); 2135 int node = dev_to_node(&dev->pci_dev->dev);
1975 int lbaf;
1976
1977 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
1978 return NULL;
1979 2136
1980 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 2137 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
1981 if (!ns) 2138 if (!ns)
1982 return NULL; 2139 return;
2140
1983 ns->queue = blk_mq_init_queue(&dev->tagset); 2141 ns->queue = blk_mq_init_queue(&dev->tagset);
1984 if (IS_ERR(ns->queue)) 2142 if (IS_ERR(ns->queue))
1985 goto out_free_ns; 2143 goto out_free_ns;
@@ -1995,9 +2153,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1995 2153
1996 ns->ns_id = nsid; 2154 ns->ns_id = nsid;
1997 ns->disk = disk; 2155 ns->disk = disk;
1998 lbaf = id->flbas & 0xf; 2156 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
1999 ns->lba_shift = id->lbaf[lbaf].ds; 2157 list_add_tail(&ns->list, &dev->namespaces);
2000 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 2158
2001 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 2159 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
2002 if (dev->max_hw_sectors) 2160 if (dev->max_hw_sectors)
2003 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 2161 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
@@ -2011,21 +2169,26 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
2011 disk->fops = &nvme_fops; 2169 disk->fops = &nvme_fops;
2012 disk->private_data = ns; 2170 disk->private_data = ns;
2013 disk->queue = ns->queue; 2171 disk->queue = ns->queue;
2014 disk->driverfs_dev = &dev->pci_dev->dev; 2172 disk->driverfs_dev = dev->device;
2015 disk->flags = GENHD_FL_EXT_DEVT; 2173 disk->flags = GENHD_FL_EXT_DEVT;
2016 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); 2174 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
2017 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
2018
2019 if (dev->oncs & NVME_CTRL_ONCS_DSM)
2020 nvme_config_discard(ns);
2021
2022 return ns;
2023 2175
2176 /*
2177 * Initialize capacity to 0 until we establish the namespace format and
2178 * setup integrity extentions if necessary. The revalidate_disk after
2179 * add_disk allows the driver to register with integrity if the format
2180 * requires it.
2181 */
2182 set_capacity(disk, 0);
2183 nvme_revalidate_disk(ns->disk);
2184 add_disk(ns->disk);
2185 if (ns->ms)
2186 revalidate_disk(ns->disk);
2187 return;
2024 out_free_queue: 2188 out_free_queue:
2025 blk_cleanup_queue(ns->queue); 2189 blk_cleanup_queue(ns->queue);
2026 out_free_ns: 2190 out_free_ns:
2027 kfree(ns); 2191 kfree(ns);
2028 return NULL;
2029} 2192}
2030 2193
2031static void nvme_create_io_queues(struct nvme_dev *dev) 2194static void nvme_create_io_queues(struct nvme_dev *dev)
@@ -2150,22 +2313,20 @@ static int nvme_dev_add(struct nvme_dev *dev)
2150 struct pci_dev *pdev = dev->pci_dev; 2313 struct pci_dev *pdev = dev->pci_dev;
2151 int res; 2314 int res;
2152 unsigned nn, i; 2315 unsigned nn, i;
2153 struct nvme_ns *ns;
2154 struct nvme_id_ctrl *ctrl; 2316 struct nvme_id_ctrl *ctrl;
2155 struct nvme_id_ns *id_ns;
2156 void *mem; 2317 void *mem;
2157 dma_addr_t dma_addr; 2318 dma_addr_t dma_addr;
2158 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; 2319 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
2159 2320
2160 mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL); 2321 mem = dma_alloc_coherent(&pdev->dev, 4096, &dma_addr, GFP_KERNEL);
2161 if (!mem) 2322 if (!mem)
2162 return -ENOMEM; 2323 return -ENOMEM;
2163 2324
2164 res = nvme_identify(dev, 0, 1, dma_addr); 2325 res = nvme_identify(dev, 0, 1, dma_addr);
2165 if (res) { 2326 if (res) {
2166 dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res); 2327 dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res);
2167 res = -EIO; 2328 dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
2168 goto out; 2329 return -EIO;
2169 } 2330 }
2170 2331
2171 ctrl = mem; 2332 ctrl = mem;
@@ -2191,6 +2352,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
2191 } else 2352 } else
2192 dev->max_hw_sectors = max_hw_sectors; 2353 dev->max_hw_sectors = max_hw_sectors;
2193 } 2354 }
2355 dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
2194 2356
2195 dev->tagset.ops = &nvme_mq_ops; 2357 dev->tagset.ops = &nvme_mq_ops;
2196 dev->tagset.nr_hw_queues = dev->online_queues - 1; 2358 dev->tagset.nr_hw_queues = dev->online_queues - 1;
@@ -2203,33 +2365,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
2203 dev->tagset.driver_data = dev; 2365 dev->tagset.driver_data = dev;
2204 2366
2205 if (blk_mq_alloc_tag_set(&dev->tagset)) 2367 if (blk_mq_alloc_tag_set(&dev->tagset))
2206 goto out; 2368 return 0;
2207
2208 id_ns = mem;
2209 for (i = 1; i <= nn; i++) {
2210 res = nvme_identify(dev, i, 0, dma_addr);
2211 if (res)
2212 continue;
2213
2214 if (id_ns->ncap == 0)
2215 continue;
2216
2217 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
2218 dma_addr + 4096, NULL);
2219 if (res)
2220 memset(mem + 4096, 0, 4096);
2221 2369
2222 ns = nvme_alloc_ns(dev, i, mem, mem + 4096); 2370 for (i = 1; i <= nn; i++)
2223 if (ns) 2371 nvme_alloc_ns(dev, i);
2224 list_add_tail(&ns->list, &dev->namespaces);
2225 }
2226 list_for_each_entry(ns, &dev->namespaces, list)
2227 add_disk(ns->disk);
2228 res = 0;
2229 2372
2230 out: 2373 return 0;
2231 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
2232 return res;
2233} 2374}
2234 2375
2235static int nvme_dev_map(struct nvme_dev *dev) 2376static int nvme_dev_map(struct nvme_dev *dev)
@@ -2358,8 +2499,6 @@ static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
2358static void nvme_del_queue_end(struct nvme_queue *nvmeq) 2499static void nvme_del_queue_end(struct nvme_queue *nvmeq)
2359{ 2500{
2360 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; 2501 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
2361
2362 nvme_clear_queue(nvmeq);
2363 nvme_put_dq(dq); 2502 nvme_put_dq(dq);
2364} 2503}
2365 2504
@@ -2502,7 +2641,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2502 int i; 2641 int i;
2503 u32 csts = -1; 2642 u32 csts = -1;
2504 2643
2505 dev->initialized = 0;
2506 nvme_dev_list_remove(dev); 2644 nvme_dev_list_remove(dev);
2507 2645
2508 if (dev->bar) { 2646 if (dev->bar) {
@@ -2513,7 +2651,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2513 for (i = dev->queue_count - 1; i >= 0; i--) { 2651 for (i = dev->queue_count - 1; i >= 0; i--) {
2514 struct nvme_queue *nvmeq = dev->queues[i]; 2652 struct nvme_queue *nvmeq = dev->queues[i];
2515 nvme_suspend_queue(nvmeq); 2653 nvme_suspend_queue(nvmeq);
2516 nvme_clear_queue(nvmeq);
2517 } 2654 }
2518 } else { 2655 } else {
2519 nvme_disable_io_queues(dev); 2656 nvme_disable_io_queues(dev);
@@ -2521,6 +2658,9 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2521 nvme_disable_queue(dev, 0); 2658 nvme_disable_queue(dev, 0);
2522 } 2659 }
2523 nvme_dev_unmap(dev); 2660 nvme_dev_unmap(dev);
2661
2662 for (i = dev->queue_count - 1; i >= 0; i--)
2663 nvme_clear_queue(dev->queues[i]);
2524} 2664}
2525 2665
2526static void nvme_dev_remove(struct nvme_dev *dev) 2666static void nvme_dev_remove(struct nvme_dev *dev)
@@ -2528,8 +2668,11 @@ static void nvme_dev_remove(struct nvme_dev *dev)
2528 struct nvme_ns *ns; 2668 struct nvme_ns *ns;
2529 2669
2530 list_for_each_entry(ns, &dev->namespaces, list) { 2670 list_for_each_entry(ns, &dev->namespaces, list) {
2531 if (ns->disk->flags & GENHD_FL_UP) 2671 if (ns->disk->flags & GENHD_FL_UP) {
2672 if (blk_get_integrity(ns->disk))
2673 blk_integrity_unregister(ns->disk);
2532 del_gendisk(ns->disk); 2674 del_gendisk(ns->disk);
2675 }
2533 if (!blk_queue_dying(ns->queue)) { 2676 if (!blk_queue_dying(ns->queue)) {
2534 blk_mq_abort_requeue_list(ns->queue); 2677 blk_mq_abort_requeue_list(ns->queue);
2535 blk_cleanup_queue(ns->queue); 2678 blk_cleanup_queue(ns->queue);
@@ -2611,6 +2754,7 @@ static void nvme_free_dev(struct kref *kref)
2611 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); 2754 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
2612 2755
2613 pci_dev_put(dev->pci_dev); 2756 pci_dev_put(dev->pci_dev);
2757 put_device(dev->device);
2614 nvme_free_namespaces(dev); 2758 nvme_free_namespaces(dev);
2615 nvme_release_instance(dev); 2759 nvme_release_instance(dev);
2616 blk_mq_free_tag_set(&dev->tagset); 2760 blk_mq_free_tag_set(&dev->tagset);
@@ -2622,11 +2766,27 @@ static void nvme_free_dev(struct kref *kref)
2622 2766
2623static int nvme_dev_open(struct inode *inode, struct file *f) 2767static int nvme_dev_open(struct inode *inode, struct file *f)
2624{ 2768{
2625 struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev, 2769 struct nvme_dev *dev;
2626 miscdev); 2770 int instance = iminor(inode);
2627 kref_get(&dev->kref); 2771 int ret = -ENODEV;
2628 f->private_data = dev; 2772
2629 return 0; 2773 spin_lock(&dev_list_lock);
2774 list_for_each_entry(dev, &dev_list, node) {
2775 if (dev->instance == instance) {
2776 if (!dev->admin_q) {
2777 ret = -EWOULDBLOCK;
2778 break;
2779 }
2780 if (!kref_get_unless_zero(&dev->kref))
2781 break;
2782 f->private_data = dev;
2783 ret = 0;
2784 break;
2785 }
2786 }
2787 spin_unlock(&dev_list_lock);
2788
2789 return ret;
2630} 2790}
2631 2791
2632static int nvme_dev_release(struct inode *inode, struct file *f) 2792static int nvme_dev_release(struct inode *inode, struct file *f)
@@ -2768,7 +2928,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
2768 nvme_unfreeze_queues(dev); 2928 nvme_unfreeze_queues(dev);
2769 nvme_set_irq_hints(dev); 2929 nvme_set_irq_hints(dev);
2770 } 2930 }
2771 dev->initialized = 1;
2772 return 0; 2931 return 0;
2773} 2932}
2774 2933
@@ -2799,6 +2958,7 @@ static void nvme_reset_workfn(struct work_struct *work)
2799 dev->reset_workfn(work); 2958 dev->reset_workfn(work);
2800} 2959}
2801 2960
2961static void nvme_async_probe(struct work_struct *work);
2802static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2962static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2803{ 2963{
2804 int node, result = -ENOMEM; 2964 int node, result = -ENOMEM;
@@ -2834,37 +2994,20 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2834 goto release; 2994 goto release;
2835 2995
2836 kref_init(&dev->kref); 2996 kref_init(&dev->kref);
2837 result = nvme_dev_start(dev); 2997 dev->device = device_create(nvme_class, &pdev->dev,
2838 if (result) 2998 MKDEV(nvme_char_major, dev->instance),
2999 dev, "nvme%d", dev->instance);
3000 if (IS_ERR(dev->device)) {
3001 result = PTR_ERR(dev->device);
2839 goto release_pools; 3002 goto release_pools;
3003 }
3004 get_device(dev->device);
2840 3005
2841 if (dev->online_queues > 1) 3006 INIT_WORK(&dev->probe_work, nvme_async_probe);
2842 result = nvme_dev_add(dev); 3007 schedule_work(&dev->probe_work);
2843 if (result)
2844 goto shutdown;
2845
2846 scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
2847 dev->miscdev.minor = MISC_DYNAMIC_MINOR;
2848 dev->miscdev.parent = &pdev->dev;
2849 dev->miscdev.name = dev->name;
2850 dev->miscdev.fops = &nvme_dev_fops;
2851 result = misc_register(&dev->miscdev);
2852 if (result)
2853 goto remove;
2854
2855 nvme_set_irq_hints(dev);
2856
2857 dev->initialized = 1;
2858 return 0; 3008 return 0;
2859 3009
2860 remove:
2861 nvme_dev_remove(dev);
2862 nvme_dev_remove_admin(dev);
2863 nvme_free_namespaces(dev);
2864 shutdown:
2865 nvme_dev_shutdown(dev);
2866 release_pools: 3010 release_pools:
2867 nvme_free_queues(dev, 0);
2868 nvme_release_prp_pools(dev); 3011 nvme_release_prp_pools(dev);
2869 release: 3012 release:
2870 nvme_release_instance(dev); 3013 nvme_release_instance(dev);
@@ -2877,6 +3020,29 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2877 return result; 3020 return result;
2878} 3021}
2879 3022
3023static void nvme_async_probe(struct work_struct *work)
3024{
3025 struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
3026 int result;
3027
3028 result = nvme_dev_start(dev);
3029 if (result)
3030 goto reset;
3031
3032 if (dev->online_queues > 1)
3033 result = nvme_dev_add(dev);
3034 if (result)
3035 goto reset;
3036
3037 nvme_set_irq_hints(dev);
3038 return;
3039 reset:
3040 if (!work_busy(&dev->reset_work)) {
3041 dev->reset_workfn = nvme_reset_failed_dev;
3042 queue_work(nvme_workq, &dev->reset_work);
3043 }
3044}
3045
2880static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) 3046static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
2881{ 3047{
2882 struct nvme_dev *dev = pci_get_drvdata(pdev); 3048 struct nvme_dev *dev = pci_get_drvdata(pdev);
@@ -2902,11 +3068,12 @@ static void nvme_remove(struct pci_dev *pdev)
2902 spin_unlock(&dev_list_lock); 3068 spin_unlock(&dev_list_lock);
2903 3069
2904 pci_set_drvdata(pdev, NULL); 3070 pci_set_drvdata(pdev, NULL);
3071 flush_work(&dev->probe_work);
2905 flush_work(&dev->reset_work); 3072 flush_work(&dev->reset_work);
2906 misc_deregister(&dev->miscdev);
2907 nvme_dev_shutdown(dev); 3073 nvme_dev_shutdown(dev);
2908 nvme_dev_remove(dev); 3074 nvme_dev_remove(dev);
2909 nvme_dev_remove_admin(dev); 3075 nvme_dev_remove_admin(dev);
3076 device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
2910 nvme_free_queues(dev, 0); 3077 nvme_free_queues(dev, 0);
2911 nvme_release_prp_pools(dev); 3078 nvme_release_prp_pools(dev);
2912 kref_put(&dev->kref, nvme_free_dev); 3079 kref_put(&dev->kref, nvme_free_dev);
@@ -2990,11 +3157,26 @@ static int __init nvme_init(void)
2990 else if (result > 0) 3157 else if (result > 0)
2991 nvme_major = result; 3158 nvme_major = result;
2992 3159
3160 result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
3161 &nvme_dev_fops);
3162 if (result < 0)
3163 goto unregister_blkdev;
3164 else if (result > 0)
3165 nvme_char_major = result;
3166
3167 nvme_class = class_create(THIS_MODULE, "nvme");
3168 if (!nvme_class)
3169 goto unregister_chrdev;
3170
2993 result = pci_register_driver(&nvme_driver); 3171 result = pci_register_driver(&nvme_driver);
2994 if (result) 3172 if (result)
2995 goto unregister_blkdev; 3173 goto destroy_class;
2996 return 0; 3174 return 0;
2997 3175
3176 destroy_class:
3177 class_destroy(nvme_class);
3178 unregister_chrdev:
3179 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
2998 unregister_blkdev: 3180 unregister_blkdev:
2999 unregister_blkdev(nvme_major, "nvme"); 3181 unregister_blkdev(nvme_major, "nvme");
3000 kill_workq: 3182 kill_workq:
@@ -3005,9 +3187,10 @@ static int __init nvme_init(void)
3005static void __exit nvme_exit(void) 3187static void __exit nvme_exit(void)
3006{ 3188{
3007 pci_unregister_driver(&nvme_driver); 3189 pci_unregister_driver(&nvme_driver);
3008 unregister_hotcpu_notifier(&nvme_nb);
3009 unregister_blkdev(nvme_major, "nvme"); 3190 unregister_blkdev(nvme_major, "nvme");
3010 destroy_workqueue(nvme_workq); 3191 destroy_workqueue(nvme_workq);
3192 class_destroy(nvme_class);
3193 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
3011 BUG_ON(nvme_thread && !IS_ERR(nvme_thread)); 3194 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
3012 _nvme_check_size(); 3195 _nvme_check_size();
3013} 3196}
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 5e78568026c3..e10196e0182d 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -779,10 +779,8 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
779 struct nvme_dev *dev = ns->dev; 779 struct nvme_dev *dev = ns->dev;
780 dma_addr_t dma_addr; 780 dma_addr_t dma_addr;
781 void *mem; 781 void *mem;
782 struct nvme_id_ctrl *id_ctrl;
783 int res = SNTI_TRANSLATION_SUCCESS; 782 int res = SNTI_TRANSLATION_SUCCESS;
784 int nvme_sc; 783 int nvme_sc;
785 u8 ieee[4];
786 int xfer_len; 784 int xfer_len;
787 __be32 tmp_id = cpu_to_be32(ns->ns_id); 785 __be32 tmp_id = cpu_to_be32(ns->ns_id);
788 786
@@ -793,46 +791,60 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
793 goto out_dma; 791 goto out_dma;
794 } 792 }
795 793
796 /* nvme controller identify */ 794 memset(inq_response, 0, alloc_len);
797 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
798 res = nvme_trans_status_code(hdr, nvme_sc);
799 if (res)
800 goto out_free;
801 if (nvme_sc) {
802 res = nvme_sc;
803 goto out_free;
804 }
805 id_ctrl = mem;
806
807 /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */
808 ieee[0] = id_ctrl->ieee[0] << 4;
809 ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4;
810 ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4;
811 ieee[3] = id_ctrl->ieee[2] >> 4;
812
813 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
814 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */ 795 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */
815 inq_response[3] = 20; /* Page Length */ 796 if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) {
816 /* Designation Descriptor start */ 797 struct nvme_id_ns *id_ns = mem;
817 inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */ 798 void *eui = id_ns->eui64;
818 inq_response[5] = 0x03; /* PIV=0b | Asso=00b | Designator Type=3h */ 799 int len = sizeof(id_ns->eui64);
819 inq_response[6] = 0x00; /* Rsvd */
820 inq_response[7] = 16; /* Designator Length */
821 /* Designator start */
822 inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/
823 inq_response[9] = ieee[2]; /* IEEE ID */
824 inq_response[10] = ieee[1]; /* IEEE ID */
825 inq_response[11] = ieee[0]; /* IEEE ID| Vendor Specific ID... */
826 inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8;
827 inq_response[13] = (dev->pci_dev->vendor & 0x00FF);
828 inq_response[14] = dev->serial[0];
829 inq_response[15] = dev->serial[1];
830 inq_response[16] = dev->model[0];
831 inq_response[17] = dev->model[1];
832 memcpy(&inq_response[18], &tmp_id, sizeof(u32));
833 /* Last 2 bytes are zero */
834 800
835 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 801 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
802 res = nvme_trans_status_code(hdr, nvme_sc);
803 if (res)
804 goto out_free;
805 if (nvme_sc) {
806 res = nvme_sc;
807 goto out_free;
808 }
809
810 if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) {
811 if (bitmap_empty(eui, len * 8)) {
812 eui = id_ns->nguid;
813 len = sizeof(id_ns->nguid);
814 }
815 }
816 if (bitmap_empty(eui, len * 8))
817 goto scsi_string;
818
819 inq_response[3] = 4 + len; /* Page Length */
820 /* Designation Descriptor start */
821 inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */
822 inq_response[5] = 0x02; /* PIV=0b | Asso=00b | Designator Type=2h */
823 inq_response[6] = 0x00; /* Rsvd */
824 inq_response[7] = len; /* Designator Length */
825 memcpy(&inq_response[8], eui, len);
826 } else {
827 scsi_string:
828 if (alloc_len < 72) {
829 res = nvme_trans_completion(hdr,
830 SAM_STAT_CHECK_CONDITION,
831 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
832 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
833 goto out_free;
834 }
835 inq_response[3] = 0x48; /* Page Length */
836 /* Designation Descriptor start */
837 inq_response[4] = 0x03; /* Proto ID=0h | Code set=3h */
838 inq_response[5] = 0x08; /* PIV=0b | Asso=00b | Designator Type=8h */
839 inq_response[6] = 0x00; /* Rsvd */
840 inq_response[7] = 0x44; /* Designator Length */
841
842 sprintf(&inq_response[8], "%04x", dev->pci_dev->vendor);
843 memcpy(&inq_response[12], dev->model, sizeof(dev->model));
844 sprintf(&inq_response[52], "%04x", tmp_id);
845 memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
846 }
847 xfer_len = alloc_len;
836 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 848 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
837 849
838 out_free: 850 out_free:
@@ -1600,7 +1612,7 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
1600 /* 10 Byte CDB */ 1612 /* 10 Byte CDB */
1601 *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) + 1613 *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
1602 parm_list[MODE_SELECT_10_BD_OFFSET + 1]; 1614 parm_list[MODE_SELECT_10_BD_OFFSET + 1];
1603 *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] && 1615 *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &
1604 MODE_SELECT_10_LLBAA_MASK; 1616 MODE_SELECT_10_LLBAA_MASK;
1605 } else { 1617 } else {
1606 /* 6 Byte CDB */ 1618 /* 6 Byte CDB */
@@ -2222,7 +2234,7 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2222 page_code = GET_INQ_PAGE_CODE(cmd); 2234 page_code = GET_INQ_PAGE_CODE(cmd);
2223 alloc_len = GET_INQ_ALLOC_LENGTH(cmd); 2235 alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
2224 2236
2225 inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL); 2237 inq_response = kmalloc(alloc_len, GFP_KERNEL);
2226 if (inq_response == NULL) { 2238 if (inq_response == NULL) {
2227 res = -ENOMEM; 2239 res = -ENOMEM;
2228 goto out_mem; 2240 goto out_mem;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8a86b62466f7..b40af3203089 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -38,6 +38,7 @@
38#include <linux/kernel.h> 38#include <linux/kernel.h>
39#include <linux/device.h> 39#include <linux/device.h>
40#include <linux/module.h> 40#include <linux/module.h>
41#include <linux/blk-mq.h>
41#include <linux/fs.h> 42#include <linux/fs.h>
42#include <linux/blkdev.h> 43#include <linux/blkdev.h>
43#include <linux/slab.h> 44#include <linux/slab.h>
@@ -340,9 +341,7 @@ struct rbd_device {
340 341
341 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */ 342 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
342 343
343 struct list_head rq_queue; /* incoming rq queue */
344 spinlock_t lock; /* queue, flags, open_count */ 344 spinlock_t lock; /* queue, flags, open_count */
345 struct work_struct rq_work;
346 345
347 struct rbd_image_header header; 346 struct rbd_image_header header;
348 unsigned long flags; /* possibly lock protected */ 347 unsigned long flags; /* possibly lock protected */
@@ -360,6 +359,9 @@ struct rbd_device {
360 atomic_t parent_ref; 359 atomic_t parent_ref;
361 struct rbd_device *parent; 360 struct rbd_device *parent;
362 361
362 /* Block layer tags. */
363 struct blk_mq_tag_set tag_set;
364
363 /* protects updating the header */ 365 /* protects updating the header */
364 struct rw_semaphore header_rwsem; 366 struct rw_semaphore header_rwsem;
365 367
@@ -1817,7 +1819,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1817 1819
1818 /* 1820 /*
1819 * We support a 64-bit length, but ultimately it has to be 1821 * We support a 64-bit length, but ultimately it has to be
1820 * passed to blk_end_request(), which takes an unsigned int. 1822 * passed to the block layer, which just supports a 32-bit
1823 * length field.
1821 */ 1824 */
1822 obj_request->xferred = osd_req->r_reply_op_len[0]; 1825 obj_request->xferred = osd_req->r_reply_op_len[0];
1823 rbd_assert(obj_request->xferred < (u64)UINT_MAX); 1826 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
@@ -2275,7 +2278,10 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2275 more = obj_request->which < img_request->obj_request_count - 1; 2278 more = obj_request->which < img_request->obj_request_count - 1;
2276 } else { 2279 } else {
2277 rbd_assert(img_request->rq != NULL); 2280 rbd_assert(img_request->rq != NULL);
2278 more = blk_end_request(img_request->rq, result, xferred); 2281
2282 more = blk_update_request(img_request->rq, result, xferred);
2283 if (!more)
2284 __blk_mq_end_request(img_request->rq, result);
2279 } 2285 }
2280 2286
2281 return more; 2287 return more;
@@ -3304,8 +3310,10 @@ out:
3304 return ret; 3310 return ret;
3305} 3311}
3306 3312
3307static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq) 3313static void rbd_queue_workfn(struct work_struct *work)
3308{ 3314{
3315 struct request *rq = blk_mq_rq_from_pdu(work);
3316 struct rbd_device *rbd_dev = rq->q->queuedata;
3309 struct rbd_img_request *img_request; 3317 struct rbd_img_request *img_request;
3310 struct ceph_snap_context *snapc = NULL; 3318 struct ceph_snap_context *snapc = NULL;
3311 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; 3319 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
@@ -3314,6 +3322,13 @@ static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
3314 u64 mapping_size; 3322 u64 mapping_size;
3315 int result; 3323 int result;
3316 3324
3325 if (rq->cmd_type != REQ_TYPE_FS) {
3326 dout("%s: non-fs request type %d\n", __func__,
3327 (int) rq->cmd_type);
3328 result = -EIO;
3329 goto err;
3330 }
3331
3317 if (rq->cmd_flags & REQ_DISCARD) 3332 if (rq->cmd_flags & REQ_DISCARD)
3318 op_type = OBJ_OP_DISCARD; 3333 op_type = OBJ_OP_DISCARD;
3319 else if (rq->cmd_flags & REQ_WRITE) 3334 else if (rq->cmd_flags & REQ_WRITE)
@@ -3359,6 +3374,8 @@ static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
3359 goto err_rq; /* Shouldn't happen */ 3374 goto err_rq; /* Shouldn't happen */
3360 } 3375 }
3361 3376
3377 blk_mq_start_request(rq);
3378
3362 down_read(&rbd_dev->header_rwsem); 3379 down_read(&rbd_dev->header_rwsem);
3363 mapping_size = rbd_dev->mapping.size; 3380 mapping_size = rbd_dev->mapping.size;
3364 if (op_type != OBJ_OP_READ) { 3381 if (op_type != OBJ_OP_READ) {
@@ -3404,53 +3421,18 @@ err_rq:
3404 rbd_warn(rbd_dev, "%s %llx at %llx result %d", 3421 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3405 obj_op_name(op_type), length, offset, result); 3422 obj_op_name(op_type), length, offset, result);
3406 ceph_put_snap_context(snapc); 3423 ceph_put_snap_context(snapc);
3407 blk_end_request_all(rq, result); 3424err:
3425 blk_mq_end_request(rq, result);
3408} 3426}
3409 3427
3410static void rbd_request_workfn(struct work_struct *work) 3428static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3429 const struct blk_mq_queue_data *bd)
3411{ 3430{
3412 struct rbd_device *rbd_dev = 3431 struct request *rq = bd->rq;
3413 container_of(work, struct rbd_device, rq_work); 3432 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3414 struct request *rq, *next;
3415 LIST_HEAD(requests);
3416
3417 spin_lock_irq(&rbd_dev->lock); /* rq->q->queue_lock */
3418 list_splice_init(&rbd_dev->rq_queue, &requests);
3419 spin_unlock_irq(&rbd_dev->lock);
3420 3433
3421 list_for_each_entry_safe(rq, next, &requests, queuelist) { 3434 queue_work(rbd_wq, work);
3422 list_del_init(&rq->queuelist); 3435 return BLK_MQ_RQ_QUEUE_OK;
3423 rbd_handle_request(rbd_dev, rq);
3424 }
3425}
3426
3427/*
3428 * Called with q->queue_lock held and interrupts disabled, possibly on
3429 * the way to schedule(). Do not sleep here!
3430 */
3431static void rbd_request_fn(struct request_queue *q)
3432{
3433 struct rbd_device *rbd_dev = q->queuedata;
3434 struct request *rq;
3435 int queued = 0;
3436
3437 rbd_assert(rbd_dev);
3438
3439 while ((rq = blk_fetch_request(q))) {
3440 /* Ignore any non-FS requests that filter through. */
3441 if (rq->cmd_type != REQ_TYPE_FS) {
3442 dout("%s: non-fs request type %d\n", __func__,
3443 (int) rq->cmd_type);
3444 __blk_end_request_all(rq, 0);
3445 continue;
3446 }
3447
3448 list_add_tail(&rq->queuelist, &rbd_dev->rq_queue);
3449 queued++;
3450 }
3451
3452 if (queued)
3453 queue_work(rbd_wq, &rbd_dev->rq_work);
3454} 3436}
3455 3437
3456/* 3438/*
@@ -3511,6 +3493,7 @@ static void rbd_free_disk(struct rbd_device *rbd_dev)
3511 del_gendisk(disk); 3493 del_gendisk(disk);
3512 if (disk->queue) 3494 if (disk->queue)
3513 blk_cleanup_queue(disk->queue); 3495 blk_cleanup_queue(disk->queue);
3496 blk_mq_free_tag_set(&rbd_dev->tag_set);
3514 } 3497 }
3515 put_disk(disk); 3498 put_disk(disk);
3516} 3499}
@@ -3694,7 +3677,7 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3694 3677
3695 ret = rbd_dev_header_info(rbd_dev); 3678 ret = rbd_dev_header_info(rbd_dev);
3696 if (ret) 3679 if (ret)
3697 return ret; 3680 goto out;
3698 3681
3699 /* 3682 /*
3700 * If there is a parent, see if it has disappeared due to the 3683 * If there is a parent, see if it has disappeared due to the
@@ -3703,30 +3686,46 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3703 if (rbd_dev->parent) { 3686 if (rbd_dev->parent) {
3704 ret = rbd_dev_v2_parent_info(rbd_dev); 3687 ret = rbd_dev_v2_parent_info(rbd_dev);
3705 if (ret) 3688 if (ret)
3706 return ret; 3689 goto out;
3707 } 3690 }
3708 3691
3709 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) { 3692 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3710 if (rbd_dev->mapping.size != rbd_dev->header.image_size) 3693 rbd_dev->mapping.size = rbd_dev->header.image_size;
3711 rbd_dev->mapping.size = rbd_dev->header.image_size;
3712 } else { 3694 } else {
3713 /* validate mapped snapshot's EXISTS flag */ 3695 /* validate mapped snapshot's EXISTS flag */
3714 rbd_exists_validate(rbd_dev); 3696 rbd_exists_validate(rbd_dev);
3715 } 3697 }
3716 3698
3699out:
3717 up_write(&rbd_dev->header_rwsem); 3700 up_write(&rbd_dev->header_rwsem);
3718 3701 if (!ret && mapping_size != rbd_dev->mapping.size)
3719 if (mapping_size != rbd_dev->mapping.size)
3720 rbd_dev_update_size(rbd_dev); 3702 rbd_dev_update_size(rbd_dev);
3721 3703
3704 return ret;
3705}
3706
3707static int rbd_init_request(void *data, struct request *rq,
3708 unsigned int hctx_idx, unsigned int request_idx,
3709 unsigned int numa_node)
3710{
3711 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3712
3713 INIT_WORK(work, rbd_queue_workfn);
3722 return 0; 3714 return 0;
3723} 3715}
3724 3716
3717static struct blk_mq_ops rbd_mq_ops = {
3718 .queue_rq = rbd_queue_rq,
3719 .map_queue = blk_mq_map_queue,
3720 .init_request = rbd_init_request,
3721};
3722
3725static int rbd_init_disk(struct rbd_device *rbd_dev) 3723static int rbd_init_disk(struct rbd_device *rbd_dev)
3726{ 3724{
3727 struct gendisk *disk; 3725 struct gendisk *disk;
3728 struct request_queue *q; 3726 struct request_queue *q;
3729 u64 segment_size; 3727 u64 segment_size;
3728 int err;
3730 3729
3731 /* create gendisk info */ 3730 /* create gendisk info */
3732 disk = alloc_disk(single_major ? 3731 disk = alloc_disk(single_major ?
@@ -3744,10 +3743,25 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
3744 disk->fops = &rbd_bd_ops; 3743 disk->fops = &rbd_bd_ops;
3745 disk->private_data = rbd_dev; 3744 disk->private_data = rbd_dev;
3746 3745
3747 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock); 3746 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3748 if (!q) 3747 rbd_dev->tag_set.ops = &rbd_mq_ops;
3748 rbd_dev->tag_set.queue_depth = BLKDEV_MAX_RQ;
3749 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
3750 rbd_dev->tag_set.flags =
3751 BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
3752 rbd_dev->tag_set.nr_hw_queues = 1;
3753 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3754
3755 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3756 if (err)
3749 goto out_disk; 3757 goto out_disk;
3750 3758
3759 q = blk_mq_init_queue(&rbd_dev->tag_set);
3760 if (IS_ERR(q)) {
3761 err = PTR_ERR(q);
3762 goto out_tag_set;
3763 }
3764
3751 /* We use the default size, but let's be explicit about it. */ 3765 /* We use the default size, but let's be explicit about it. */
3752 blk_queue_physical_block_size(q, SECTOR_SIZE); 3766 blk_queue_physical_block_size(q, SECTOR_SIZE);
3753 3767
@@ -3773,10 +3787,11 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
3773 rbd_dev->disk = disk; 3787 rbd_dev->disk = disk;
3774 3788
3775 return 0; 3789 return 0;
3790out_tag_set:
3791 blk_mq_free_tag_set(&rbd_dev->tag_set);
3776out_disk: 3792out_disk:
3777 put_disk(disk); 3793 put_disk(disk);
3778 3794 return err;
3779 return -ENOMEM;
3780} 3795}
3781 3796
3782/* 3797/*
@@ -4033,8 +4048,6 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4033 return NULL; 4048 return NULL;
4034 4049
4035 spin_lock_init(&rbd_dev->lock); 4050 spin_lock_init(&rbd_dev->lock);
4036 INIT_LIST_HEAD(&rbd_dev->rq_queue);
4037 INIT_WORK(&rbd_dev->rq_work, rbd_request_workfn);
4038 rbd_dev->flags = 0; 4051 rbd_dev->flags = 0;
4039 atomic_set(&rbd_dev->parent_ref, 0); 4052 atomic_set(&rbd_dev->parent_ref, 0);
4040 INIT_LIST_HEAD(&rbd_dev->node); 4053 INIT_LIST_HEAD(&rbd_dev->node);
@@ -4274,32 +4287,22 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4274 } 4287 }
4275 4288
4276 /* 4289 /*
4277 * We always update the parent overlap. If it's zero we 4290 * We always update the parent overlap. If it's zero we issue
4278 * treat it specially. 4291 * a warning, as we will proceed as if there was no parent.
4279 */ 4292 */
4280 rbd_dev->parent_overlap = overlap;
4281 if (!overlap) { 4293 if (!overlap) {
4282
4283 /* A null parent_spec indicates it's the initial probe */
4284
4285 if (parent_spec) { 4294 if (parent_spec) {
4286 /* 4295 /* refresh, careful to warn just once */
4287 * The overlap has become zero, so the clone 4296 if (rbd_dev->parent_overlap)
4288 * must have been resized down to 0 at some 4297 rbd_warn(rbd_dev,
4289 * point. Treat this the same as a flatten. 4298 "clone now standalone (overlap became 0)");
4290 */
4291 rbd_dev_parent_put(rbd_dev);
4292 pr_info("%s: clone image now standalone\n",
4293 rbd_dev->disk->disk_name);
4294 } else { 4299 } else {
4295 /* 4300 /* initial probe */
4296 * For the initial probe, if we find the 4301 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4297 * overlap is zero we just pretend there was
4298 * no parent image.
4299 */
4300 rbd_warn(rbd_dev, "ignoring parent with overlap 0");
4301 } 4302 }
4302 } 4303 }
4304 rbd_dev->parent_overlap = overlap;
4305
4303out: 4306out:
4304 ret = 0; 4307 ret = 0;
4305out_err: 4308out_err:
@@ -4771,36 +4774,6 @@ static inline size_t next_token(const char **buf)
4771} 4774}
4772 4775
4773/* 4776/*
4774 * Finds the next token in *buf, and if the provided token buffer is
4775 * big enough, copies the found token into it. The result, if
4776 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4777 * must be terminated with '\0' on entry.
4778 *
4779 * Returns the length of the token found (not including the '\0').
4780 * Return value will be 0 if no token is found, and it will be >=
4781 * token_size if the token would not fit.
4782 *
4783 * The *buf pointer will be updated to point beyond the end of the
4784 * found token. Note that this occurs even if the token buffer is
4785 * too small to hold it.
4786 */
4787static inline size_t copy_token(const char **buf,
4788 char *token,
4789 size_t token_size)
4790{
4791 size_t len;
4792
4793 len = next_token(buf);
4794 if (len < token_size) {
4795 memcpy(token, *buf, len);
4796 *(token + len) = '\0';
4797 }
4798 *buf += len;
4799
4800 return len;
4801}
4802
4803/*
4804 * Finds the next token in *buf, dynamically allocates a buffer big 4777 * Finds the next token in *buf, dynamically allocates a buffer big
4805 * enough to hold a copy of it, and copies the token into the new 4778 * enough to hold a copy of it, and copies the token into the new
4806 * buffer. The copy is guaranteed to be terminated with '\0'. Note 4779 * buffer. The copy is guaranteed to be terminated with '\0'. Note
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index cdfbd21e3597..655e570b9b31 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -28,8 +28,7 @@ struct virtio_blk_vq {
28 char name[VQ_NAME_LEN]; 28 char name[VQ_NAME_LEN];
29} ____cacheline_aligned_in_smp; 29} ____cacheline_aligned_in_smp;
30 30
31struct virtio_blk 31struct virtio_blk {
32{
33 struct virtio_device *vdev; 32 struct virtio_device *vdev;
34 33
35 /* The disk structure for the kernel. */ 34 /* The disk structure for the kernel. */
@@ -52,8 +51,7 @@ struct virtio_blk
52 struct virtio_blk_vq *vqs; 51 struct virtio_blk_vq *vqs;
53}; 52};
54 53
55struct virtblk_req 54struct virtblk_req {
56{
57 struct request *req; 55 struct request *req;
58 struct virtio_blk_outhdr out_hdr; 56 struct virtio_blk_outhdr out_hdr;
59 struct virtio_scsi_inhdr in_hdr; 57 struct virtio_scsi_inhdr in_hdr;
@@ -575,6 +573,12 @@ static int virtblk_probe(struct virtio_device *vdev)
575 u16 min_io_size; 573 u16 min_io_size;
576 u8 physical_block_exp, alignment_offset; 574 u8 physical_block_exp, alignment_offset;
577 575
576 if (!vdev->config->get) {
577 dev_err(&vdev->dev, "%s failure: config access disabled\n",
578 __func__);
579 return -EINVAL;
580 }
581
578 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), 582 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
579 GFP_KERNEL); 583 GFP_KERNEL);
580 if (err < 0) 584 if (err < 0)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 8e233edd7a09..871bd3550cb0 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -528,7 +528,7 @@ out_cleanup:
528static inline void update_used_max(struct zram *zram, 528static inline void update_used_max(struct zram *zram,
529 const unsigned long pages) 529 const unsigned long pages)
530{ 530{
531 int old_max, cur_max; 531 unsigned long old_max, cur_max;
532 532
533 old_max = atomic_long_read(&zram->stats.max_used_pages); 533 old_max = atomic_long_read(&zram->stats.max_used_pages);
534 534
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3ca2e1bf7bfa..8c1bf6190533 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -273,6 +273,7 @@ static const struct usb_device_id blacklist_table[] = {
273 { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, 273 { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
274 274
275 /* Intel Bluetooth devices */ 275 /* Intel Bluetooth devices */
276 { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
276 { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, 277 { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
277 { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, 278 { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
278 { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW }, 279 { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index ec318bf434a6..1786574536b2 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -157,12 +157,16 @@ static int ipmi_release(struct inode *inode, struct file *file)
157{ 157{
158 struct ipmi_file_private *priv = file->private_data; 158 struct ipmi_file_private *priv = file->private_data;
159 int rv; 159 int rv;
160 struct ipmi_recv_msg *msg, *next;
160 161
161 rv = ipmi_destroy_user(priv->user); 162 rv = ipmi_destroy_user(priv->user);
162 if (rv) 163 if (rv)
163 return rv; 164 return rv;
164 165
165 /* FIXME - free the messages in the list. */ 166 list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
167 ipmi_free_recv_msg(msg);
168
169
166 kfree(priv); 170 kfree(priv);
167 171
168 return 0; 172 return 0;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 6b65fa4e0c55..9bb592872532 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -1483,14 +1483,10 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1483 smi_msg->msgid = msgid; 1483 smi_msg->msgid = msgid;
1484} 1484}
1485 1485
1486static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers, 1486static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf,
1487 struct ipmi_smi_msg *smi_msg, int priority) 1487 struct ipmi_smi_msg *smi_msg,
1488 int priority)
1488{ 1489{
1489 int run_to_completion = intf->run_to_completion;
1490 unsigned long flags;
1491
1492 if (!run_to_completion)
1493 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1494 if (intf->curr_msg) { 1490 if (intf->curr_msg) {
1495 if (priority > 0) 1491 if (priority > 0)
1496 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1492 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
@@ -1500,8 +1496,25 @@ static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
1500 } else { 1496 } else {
1501 intf->curr_msg = smi_msg; 1497 intf->curr_msg = smi_msg;
1502 } 1498 }
1503 if (!run_to_completion) 1499
1500 return smi_msg;
1501}
1502
1503
1504static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
1505 struct ipmi_smi_msg *smi_msg, int priority)
1506{
1507 int run_to_completion = intf->run_to_completion;
1508
1509 if (run_to_completion) {
1510 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1511 } else {
1512 unsigned long flags;
1513
1514 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1515 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1504 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1516 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1517 }
1505 1518
1506 if (smi_msg) 1519 if (smi_msg)
1507 handlers->sender(intf->send_info, smi_msg); 1520 handlers->sender(intf->send_info, smi_msg);
@@ -1985,7 +1998,9 @@ static int smi_ipmb_proc_show(struct seq_file *m, void *v)
1985 seq_printf(m, "%x", intf->channels[0].address); 1998 seq_printf(m, "%x", intf->channels[0].address);
1986 for (i = 1; i < IPMI_MAX_CHANNELS; i++) 1999 for (i = 1; i < IPMI_MAX_CHANNELS; i++)
1987 seq_printf(m, " %x", intf->channels[i].address); 2000 seq_printf(m, " %x", intf->channels[i].address);
1988 return seq_putc(m, '\n'); 2001 seq_putc(m, '\n');
2002
2003 return seq_has_overflowed(m);
1989} 2004}
1990 2005
1991static int smi_ipmb_proc_open(struct inode *inode, struct file *file) 2006static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
@@ -2004,9 +2019,11 @@ static int smi_version_proc_show(struct seq_file *m, void *v)
2004{ 2019{
2005 ipmi_smi_t intf = m->private; 2020 ipmi_smi_t intf = m->private;
2006 2021
2007 return seq_printf(m, "%u.%u\n", 2022 seq_printf(m, "%u.%u\n",
2008 ipmi_version_major(&intf->bmc->id), 2023 ipmi_version_major(&intf->bmc->id),
2009 ipmi_version_minor(&intf->bmc->id)); 2024 ipmi_version_minor(&intf->bmc->id));
2025
2026 return seq_has_overflowed(m);
2010} 2027}
2011 2028
2012static int smi_version_proc_open(struct inode *inode, struct file *file) 2029static int smi_version_proc_open(struct inode *inode, struct file *file)
@@ -2353,11 +2370,28 @@ static struct attribute *bmc_dev_attrs[] = {
2353 &dev_attr_additional_device_support.attr, 2370 &dev_attr_additional_device_support.attr,
2354 &dev_attr_manufacturer_id.attr, 2371 &dev_attr_manufacturer_id.attr,
2355 &dev_attr_product_id.attr, 2372 &dev_attr_product_id.attr,
2373 &dev_attr_aux_firmware_revision.attr,
2374 &dev_attr_guid.attr,
2356 NULL 2375 NULL
2357}; 2376};
2358 2377
2378static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2379 struct attribute *attr, int idx)
2380{
2381 struct device *dev = kobj_to_dev(kobj);
2382 struct bmc_device *bmc = to_bmc_device(dev);
2383 umode_t mode = attr->mode;
2384
2385 if (attr == &dev_attr_aux_firmware_revision.attr)
2386 return bmc->id.aux_firmware_revision_set ? mode : 0;
2387 if (attr == &dev_attr_guid.attr)
2388 return bmc->guid_set ? mode : 0;
2389 return mode;
2390}
2391
2359static struct attribute_group bmc_dev_attr_group = { 2392static struct attribute_group bmc_dev_attr_group = {
2360 .attrs = bmc_dev_attrs, 2393 .attrs = bmc_dev_attrs,
2394 .is_visible = bmc_dev_attr_is_visible,
2361}; 2395};
2362 2396
2363static const struct attribute_group *bmc_dev_attr_groups[] = { 2397static const struct attribute_group *bmc_dev_attr_groups[] = {
@@ -2380,13 +2414,6 @@ cleanup_bmc_device(struct kref *ref)
2380{ 2414{
2381 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 2415 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
2382 2416
2383 if (bmc->id.aux_firmware_revision_set)
2384 device_remove_file(&bmc->pdev.dev,
2385 &dev_attr_aux_firmware_revision);
2386 if (bmc->guid_set)
2387 device_remove_file(&bmc->pdev.dev,
2388 &dev_attr_guid);
2389
2390 platform_device_unregister(&bmc->pdev); 2417 platform_device_unregister(&bmc->pdev);
2391} 2418}
2392 2419
@@ -2407,33 +2434,6 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf)
2407 mutex_unlock(&ipmidriver_mutex); 2434 mutex_unlock(&ipmidriver_mutex);
2408} 2435}
2409 2436
2410static int create_bmc_files(struct bmc_device *bmc)
2411{
2412 int err;
2413
2414 if (bmc->id.aux_firmware_revision_set) {
2415 err = device_create_file(&bmc->pdev.dev,
2416 &dev_attr_aux_firmware_revision);
2417 if (err)
2418 goto out;
2419 }
2420 if (bmc->guid_set) {
2421 err = device_create_file(&bmc->pdev.dev,
2422 &dev_attr_guid);
2423 if (err)
2424 goto out_aux_firm;
2425 }
2426
2427 return 0;
2428
2429out_aux_firm:
2430 if (bmc->id.aux_firmware_revision_set)
2431 device_remove_file(&bmc->pdev.dev,
2432 &dev_attr_aux_firmware_revision);
2433out:
2434 return err;
2435}
2436
2437static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum) 2437static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
2438{ 2438{
2439 int rv; 2439 int rv;
@@ -2522,15 +2522,6 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
2522 return rv; 2522 return rv;
2523 } 2523 }
2524 2524
2525 rv = create_bmc_files(bmc);
2526 if (rv) {
2527 mutex_lock(&ipmidriver_mutex);
2528 platform_device_unregister(&bmc->pdev);
2529 mutex_unlock(&ipmidriver_mutex);
2530
2531 return rv;
2532 }
2533
2534 dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, " 2525 dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
2535 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 2526 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2536 bmc->id.manufacturer_id, 2527 bmc->id.manufacturer_id,
@@ -4212,7 +4203,6 @@ static void need_waiter(ipmi_smi_t intf)
4212static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 4203static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4213static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 4204static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4214 4205
4215/* FIXME - convert these to slabs. */
4216static void free_smi_msg(struct ipmi_smi_msg *msg) 4206static void free_smi_msg(struct ipmi_smi_msg *msg)
4217{ 4207{
4218 atomic_dec(&smi_msg_inuse_count); 4208 atomic_dec(&smi_msg_inuse_count);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 967b73aa4e66..f6646ed3047e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -321,6 +321,18 @@ static int try_smi_init(struct smi_info *smi);
321static void cleanup_one_si(struct smi_info *to_clean); 321static void cleanup_one_si(struct smi_info *to_clean);
322static void cleanup_ipmi_si(void); 322static void cleanup_ipmi_si(void);
323 323
324#ifdef DEBUG_TIMING
325void debug_timestamp(char *msg)
326{
327 struct timespec64 t;
328
329 getnstimeofday64(&t);
330 pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
331}
332#else
333#define debug_timestamp(x)
334#endif
335
324static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); 336static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
325static int register_xaction_notifier(struct notifier_block *nb) 337static int register_xaction_notifier(struct notifier_block *nb)
326{ 338{
@@ -358,9 +370,6 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
358static enum si_sm_result start_next_msg(struct smi_info *smi_info) 370static enum si_sm_result start_next_msg(struct smi_info *smi_info)
359{ 371{
360 int rv; 372 int rv;
361#ifdef DEBUG_TIMING
362 struct timeval t;
363#endif
364 373
365 if (!smi_info->waiting_msg) { 374 if (!smi_info->waiting_msg) {
366 smi_info->curr_msg = NULL; 375 smi_info->curr_msg = NULL;
@@ -370,10 +379,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
370 379
371 smi_info->curr_msg = smi_info->waiting_msg; 380 smi_info->curr_msg = smi_info->waiting_msg;
372 smi_info->waiting_msg = NULL; 381 smi_info->waiting_msg = NULL;
373#ifdef DEBUG_TIMING 382 debug_timestamp("Start2");
374 do_gettimeofday(&t);
375 printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
376#endif
377 err = atomic_notifier_call_chain(&xaction_notifier_list, 383 err = atomic_notifier_call_chain(&xaction_notifier_list,
378 0, smi_info); 384 0, smi_info);
379 if (err & NOTIFY_STOP_MASK) { 385 if (err & NOTIFY_STOP_MASK) {
@@ -582,12 +588,8 @@ static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
582static void handle_transaction_done(struct smi_info *smi_info) 588static void handle_transaction_done(struct smi_info *smi_info)
583{ 589{
584 struct ipmi_smi_msg *msg; 590 struct ipmi_smi_msg *msg;
585#ifdef DEBUG_TIMING
586 struct timeval t;
587 591
588 do_gettimeofday(&t); 592 debug_timestamp("Done");
589 printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
590#endif
591 switch (smi_info->si_state) { 593 switch (smi_info->si_state) {
592 case SI_NORMAL: 594 case SI_NORMAL:
593 if (!smi_info->curr_msg) 595 if (!smi_info->curr_msg)
@@ -929,24 +931,15 @@ static void sender(void *send_info,
929 struct smi_info *smi_info = send_info; 931 struct smi_info *smi_info = send_info;
930 enum si_sm_result result; 932 enum si_sm_result result;
931 unsigned long flags; 933 unsigned long flags;
932#ifdef DEBUG_TIMING
933 struct timeval t;
934#endif
935
936 BUG_ON(smi_info->waiting_msg);
937 smi_info->waiting_msg = msg;
938 934
939#ifdef DEBUG_TIMING 935 debug_timestamp("Enqueue");
940 do_gettimeofday(&t);
941 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
942#endif
943 936
944 if (smi_info->run_to_completion) { 937 if (smi_info->run_to_completion) {
945 /* 938 /*
946 * If we are running to completion, start it and run 939 * If we are running to completion, start it and run
947 * transactions until everything is clear. 940 * transactions until everything is clear.
948 */ 941 */
949 smi_info->curr_msg = smi_info->waiting_msg; 942 smi_info->curr_msg = msg;
950 smi_info->waiting_msg = NULL; 943 smi_info->waiting_msg = NULL;
951 944
952 /* 945 /*
@@ -964,6 +957,15 @@ static void sender(void *send_info,
964 } 957 }
965 958
966 spin_lock_irqsave(&smi_info->si_lock, flags); 959 spin_lock_irqsave(&smi_info->si_lock, flags);
960 /*
961 * The following two lines don't need to be under the lock for
962 * the lock's sake, but they do need SMP memory barriers to
963 * avoid getting things out of order. We are already claiming
964 * the lock, anyway, so just do it under the lock to avoid the
965 * ordering problem.
966 */
967 BUG_ON(smi_info->waiting_msg);
968 smi_info->waiting_msg = msg;
967 check_start_timer_thread(smi_info); 969 check_start_timer_thread(smi_info);
968 spin_unlock_irqrestore(&smi_info->si_lock, flags); 970 spin_unlock_irqrestore(&smi_info->si_lock, flags);
969} 971}
@@ -989,18 +991,18 @@ static void set_run_to_completion(void *send_info, bool i_run_to_completion)
989 * we are spinning in kipmid looking for something and not delaying 991 * we are spinning in kipmid looking for something and not delaying
990 * between checks 992 * between checks
991 */ 993 */
992static inline void ipmi_si_set_not_busy(struct timespec *ts) 994static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
993{ 995{
994 ts->tv_nsec = -1; 996 ts->tv_nsec = -1;
995} 997}
996static inline int ipmi_si_is_busy(struct timespec *ts) 998static inline int ipmi_si_is_busy(struct timespec64 *ts)
997{ 999{
998 return ts->tv_nsec != -1; 1000 return ts->tv_nsec != -1;
999} 1001}
1000 1002
1001static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result, 1003static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
1002 const struct smi_info *smi_info, 1004 const struct smi_info *smi_info,
1003 struct timespec *busy_until) 1005 struct timespec64 *busy_until)
1004{ 1006{
1005 unsigned int max_busy_us = 0; 1007 unsigned int max_busy_us = 0;
1006 1008
@@ -1009,12 +1011,13 @@ static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
1009 if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY) 1011 if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
1010 ipmi_si_set_not_busy(busy_until); 1012 ipmi_si_set_not_busy(busy_until);
1011 else if (!ipmi_si_is_busy(busy_until)) { 1013 else if (!ipmi_si_is_busy(busy_until)) {
1012 getnstimeofday(busy_until); 1014 getnstimeofday64(busy_until);
1013 timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC); 1015 timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
1014 } else { 1016 } else {
1015 struct timespec now; 1017 struct timespec64 now;
1016 getnstimeofday(&now); 1018
1017 if (unlikely(timespec_compare(&now, busy_until) > 0)) { 1019 getnstimeofday64(&now);
1020 if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
1018 ipmi_si_set_not_busy(busy_until); 1021 ipmi_si_set_not_busy(busy_until);
1019 return 0; 1022 return 0;
1020 } 1023 }
@@ -1037,7 +1040,7 @@ static int ipmi_thread(void *data)
1037 struct smi_info *smi_info = data; 1040 struct smi_info *smi_info = data;
1038 unsigned long flags; 1041 unsigned long flags;
1039 enum si_sm_result smi_result; 1042 enum si_sm_result smi_result;
1040 struct timespec busy_until; 1043 struct timespec64 busy_until;
1041 1044
1042 ipmi_si_set_not_busy(&busy_until); 1045 ipmi_si_set_not_busy(&busy_until);
1043 set_user_nice(current, MAX_NICE); 1046 set_user_nice(current, MAX_NICE);
@@ -1128,15 +1131,10 @@ static void smi_timeout(unsigned long data)
1128 unsigned long jiffies_now; 1131 unsigned long jiffies_now;
1129 long time_diff; 1132 long time_diff;
1130 long timeout; 1133 long timeout;
1131#ifdef DEBUG_TIMING
1132 struct timeval t;
1133#endif
1134 1134
1135 spin_lock_irqsave(&(smi_info->si_lock), flags); 1135 spin_lock_irqsave(&(smi_info->si_lock), flags);
1136#ifdef DEBUG_TIMING 1136 debug_timestamp("Timer");
1137 do_gettimeofday(&t); 1137
1138 printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1139#endif
1140 jiffies_now = jiffies; 1138 jiffies_now = jiffies;
1141 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) 1139 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1142 * SI_USEC_PER_JIFFY); 1140 * SI_USEC_PER_JIFFY);
@@ -1173,18 +1171,13 @@ static irqreturn_t si_irq_handler(int irq, void *data)
1173{ 1171{
1174 struct smi_info *smi_info = data; 1172 struct smi_info *smi_info = data;
1175 unsigned long flags; 1173 unsigned long flags;
1176#ifdef DEBUG_TIMING
1177 struct timeval t;
1178#endif
1179 1174
1180 spin_lock_irqsave(&(smi_info->si_lock), flags); 1175 spin_lock_irqsave(&(smi_info->si_lock), flags);
1181 1176
1182 smi_inc_stat(smi_info, interrupts); 1177 smi_inc_stat(smi_info, interrupts);
1183 1178
1184#ifdef DEBUG_TIMING 1179 debug_timestamp("Interrupt");
1185 do_gettimeofday(&t); 1180
1186 printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1187#endif
1188 smi_event_handler(smi_info, 0); 1181 smi_event_handler(smi_info, 0);
1189 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1182 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1190 return IRQ_HANDLED; 1183 return IRQ_HANDLED;
@@ -2038,18 +2031,13 @@ static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
2038{ 2031{
2039 struct smi_info *smi_info = context; 2032 struct smi_info *smi_info = context;
2040 unsigned long flags; 2033 unsigned long flags;
2041#ifdef DEBUG_TIMING
2042 struct timeval t;
2043#endif
2044 2034
2045 spin_lock_irqsave(&(smi_info->si_lock), flags); 2035 spin_lock_irqsave(&(smi_info->si_lock), flags);
2046 2036
2047 smi_inc_stat(smi_info, interrupts); 2037 smi_inc_stat(smi_info, interrupts);
2048 2038
2049#ifdef DEBUG_TIMING 2039 debug_timestamp("ACPI_GPE");
2050 do_gettimeofday(&t); 2040
2051 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
2052#endif
2053 smi_event_handler(smi_info, 0); 2041 smi_event_handler(smi_info, 0);
2054 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 2042 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
2055 2043
@@ -2071,7 +2059,6 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
2071 if (!info->irq) 2059 if (!info->irq)
2072 return 0; 2060 return 0;
2073 2061
2074 /* FIXME - is level triggered right? */
2075 status = acpi_install_gpe_handler(NULL, 2062 status = acpi_install_gpe_handler(NULL,
2076 info->irq, 2063 info->irq,
2077 ACPI_GPE_LEVEL_TRIGGERED, 2064 ACPI_GPE_LEVEL_TRIGGERED,
@@ -2998,7 +2985,9 @@ static int smi_type_proc_show(struct seq_file *m, void *v)
2998{ 2985{
2999 struct smi_info *smi = m->private; 2986 struct smi_info *smi = m->private;
3000 2987
3001 return seq_printf(m, "%s\n", si_to_str[smi->si_type]); 2988 seq_printf(m, "%s\n", si_to_str[smi->si_type]);
2989
2990 return seq_has_overflowed(m);
3002} 2991}
3003 2992
3004static int smi_type_proc_open(struct inode *inode, struct file *file) 2993static int smi_type_proc_open(struct inode *inode, struct file *file)
@@ -3060,16 +3049,18 @@ static int smi_params_proc_show(struct seq_file *m, void *v)
3060{ 3049{
3061 struct smi_info *smi = m->private; 3050 struct smi_info *smi = m->private;
3062 3051
3063 return seq_printf(m, 3052 seq_printf(m,
3064 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", 3053 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
3065 si_to_str[smi->si_type], 3054 si_to_str[smi->si_type],
3066 addr_space_to_str[smi->io.addr_type], 3055 addr_space_to_str[smi->io.addr_type],
3067 smi->io.addr_data, 3056 smi->io.addr_data,
3068 smi->io.regspacing, 3057 smi->io.regspacing,
3069 smi->io.regsize, 3058 smi->io.regsize,
3070 smi->io.regshift, 3059 smi->io.regshift,
3071 smi->irq, 3060 smi->irq,
3072 smi->slave_addr); 3061 smi->slave_addr);
3062
3063 return seq_has_overflowed(m);
3073} 3064}
3074 3065
3075static int smi_params_proc_open(struct inode *inode, struct file *file) 3066static int smi_params_proc_open(struct inode *inode, struct file *file)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 982b96323f82..f6e378dac5f5 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1097,8 +1097,6 @@ static int ssif_remove(struct i2c_client *client)
1097 if (!ssif_info) 1097 if (!ssif_info)
1098 return 0; 1098 return 0;
1099 1099
1100 i2c_set_clientdata(client, NULL);
1101
1102 /* 1100 /*
1103 * After this point, we won't deliver anything asychronously 1101 * After this point, we won't deliver anything asychronously
1104 * to the message handler. We can unregister ourself. 1102 * to the message handler. We can unregister ourself.
@@ -1198,7 +1196,9 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
1198 1196
1199static int smi_type_proc_show(struct seq_file *m, void *v) 1197static int smi_type_proc_show(struct seq_file *m, void *v)
1200{ 1198{
1201 return seq_puts(m, "ssif\n"); 1199 seq_puts(m, "ssif\n");
1200
1201 return seq_has_overflowed(m);
1202} 1202}
1203 1203
1204static int smi_type_proc_open(struct inode *inode, struct file *file) 1204static int smi_type_proc_open(struct inode *inode, struct file *file)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 26afb56a8073..fae2dbbf5745 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1986,7 +1986,10 @@ static int virtcons_probe(struct virtio_device *vdev)
1986 bool multiport; 1986 bool multiport;
1987 bool early = early_put_chars != NULL; 1987 bool early = early_put_chars != NULL;
1988 1988
1989 if (!vdev->config->get) { 1989 /* We only need a config space if features are offered */
1990 if (!vdev->config->get &&
1991 (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)
1992 || virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT))) {
1990 dev_err(&vdev->dev, "%s failure: config access disabled\n", 1993 dev_err(&vdev->dev, "%s failure: config access disabled\n",
1991 __func__); 1994 __func__);
1992 return -EINVAL; 1995 return -EINVAL;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 91f86131bb7a..0b474a04730f 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -102,12 +102,12 @@ config COMMON_CLK_AXI_CLKGEN
102 Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx 102 Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx
103 FPGAs. It is commonly used in Analog Devices' reference designs. 103 FPGAs. It is commonly used in Analog Devices' reference designs.
104 104
105config CLK_PPC_CORENET 105config CLK_QORIQ
106 bool "Clock driver for PowerPC corenet platforms" 106 bool "Clock driver for Freescale QorIQ platforms"
107 depends on PPC_E500MC && OF 107 depends on (PPC_E500MC || ARM) && OF
108 ---help--- 108 ---help---
109 This adds the clock driver support for Freescale PowerPC corenet 109 This adds the clock driver support for Freescale QorIQ platforms
110 platforms using common clock framework. 110 using common clock framework.
111 111
112config COMMON_CLK_XGENE 112config COMMON_CLK_XGENE
113 bool "Clock driver for APM XGene SoC" 113 bool "Clock driver for APM XGene SoC"
@@ -135,6 +135,14 @@ config COMMON_CLK_PXA
135 ---help--- 135 ---help---
136 Sypport for the Marvell PXA SoC. 136 Sypport for the Marvell PXA SoC.
137 137
138config COMMON_CLK_CDCE706
139 tristate "Clock driver for TI CDCE706 clock synthesizer"
140 depends on I2C
141 select REGMAP_I2C
142 select RATIONAL
143 ---help---
144 This driver supports TI CDCE706 programmable 3-PLL clock synthesizer.
145
138source "drivers/clk/qcom/Kconfig" 146source "drivers/clk/qcom/Kconfig"
139 147
140endmenu 148endmenu
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index d5fba5bc6e1b..d478ceb69c5f 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -16,9 +16,11 @@ endif
16 16
17# hardware specific clock types 17# hardware specific clock types
18# please keep this section sorted lexicographically by file/directory path name 18# please keep this section sorted lexicographically by file/directory path name
19obj-$(CONFIG_MACH_ASM9260) += clk-asm9260.o
19obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o 20obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
20obj-$(CONFIG_ARCH_AXXIA) += clk-axm5516.o 21obj-$(CONFIG_ARCH_AXXIA) += clk-axm5516.o
21obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o 22obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
23obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o
22obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o 24obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
23obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o 25obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
24obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o 26obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
@@ -30,7 +32,7 @@ obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
30obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o 32obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
31obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o 33obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
32obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o 34obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o
33obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o 35obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o
34obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o 36obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o
35obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o 37obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
36obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o 38obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index bbdb1b985c91..86c8a073dcc3 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -56,6 +56,8 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
56 56
57static long clk_programmable_determine_rate(struct clk_hw *hw, 57static long clk_programmable_determine_rate(struct clk_hw *hw,
58 unsigned long rate, 58 unsigned long rate,
59 unsigned long min_rate,
60 unsigned long max_rate,
59 unsigned long *best_parent_rate, 61 unsigned long *best_parent_rate,
60 struct clk_hw **best_parent_hw) 62 struct clk_hw **best_parent_hw)
61{ 63{
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 1c06f6f3a8c5..05abae89262e 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -1032,6 +1032,8 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
1032} 1032}
1033 1033
1034static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate, 1034static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
1035 unsigned long min_rate,
1036 unsigned long max_rate,
1035 unsigned long *best_parent_rate, struct clk_hw **best_parent) 1037 unsigned long *best_parent_rate, struct clk_hw **best_parent)
1036{ 1038{
1037 struct kona_clk *bcm_clk = to_kona_clk(hw); 1039 struct kona_clk *bcm_clk = to_kona_clk(hw);
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
new file mode 100644
index 000000000000..88f4ff6916fe
--- /dev/null
+++ b/drivers/clk/clk-asm9260.c
@@ -0,0 +1,348 @@
1/*
2 * Copyright (c) 2014 Oleksij Rempel <linux@rempel-privat.de>.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/clk.h>
18#include <linux/clkdev.h>
19#include <linux/err.h>
20#include <linux/io.h>
21#include <linux/clk-provider.h>
22#include <linux/spinlock.h>
23#include <linux/of.h>
24#include <linux/of_address.h>
25#include <dt-bindings/clock/alphascale,asm9260.h>
26
27#define HW_AHBCLKCTRL0 0x0020
28#define HW_AHBCLKCTRL1 0x0030
29#define HW_SYSPLLCTRL 0x0100
30#define HW_MAINCLKSEL 0x0120
31#define HW_MAINCLKUEN 0x0124
32#define HW_UARTCLKSEL 0x0128
33#define HW_UARTCLKUEN 0x012c
34#define HW_I2S0CLKSEL 0x0130
35#define HW_I2S0CLKUEN 0x0134
36#define HW_I2S1CLKSEL 0x0138
37#define HW_I2S1CLKUEN 0x013c
38#define HW_WDTCLKSEL 0x0160
39#define HW_WDTCLKUEN 0x0164
40#define HW_CLKOUTCLKSEL 0x0170
41#define HW_CLKOUTCLKUEN 0x0174
42#define HW_CPUCLKDIV 0x017c
43#define HW_SYSAHBCLKDIV 0x0180
44#define HW_I2S0MCLKDIV 0x0190
45#define HW_I2S0SCLKDIV 0x0194
46#define HW_I2S1MCLKDIV 0x0188
47#define HW_I2S1SCLKDIV 0x018c
48#define HW_UART0CLKDIV 0x0198
49#define HW_UART1CLKDIV 0x019c
50#define HW_UART2CLKDIV 0x01a0
51#define HW_UART3CLKDIV 0x01a4
52#define HW_UART4CLKDIV 0x01a8
53#define HW_UART5CLKDIV 0x01ac
54#define HW_UART6CLKDIV 0x01b0
55#define HW_UART7CLKDIV 0x01b4
56#define HW_UART8CLKDIV 0x01b8
57#define HW_UART9CLKDIV 0x01bc
58#define HW_SPI0CLKDIV 0x01c0
59#define HW_SPI1CLKDIV 0x01c4
60#define HW_QUADSPICLKDIV 0x01c8
61#define HW_SSP0CLKDIV 0x01d0
62#define HW_NANDCLKDIV 0x01d4
63#define HW_TRACECLKDIV 0x01e0
64#define HW_CAMMCLKDIV 0x01e8
65#define HW_WDTCLKDIV 0x01ec
66#define HW_CLKOUTCLKDIV 0x01f4
67#define HW_MACCLKDIV 0x01f8
68#define HW_LCDCLKDIV 0x01fc
69#define HW_ADCANACLKDIV 0x0200
70
71static struct clk *clks[MAX_CLKS];
72static struct clk_onecell_data clk_data;
73static DEFINE_SPINLOCK(asm9260_clk_lock);
74
75struct asm9260_div_clk {
76 unsigned int idx;
77 const char *name;
78 const char *parent_name;
79 u32 reg;
80};
81
82struct asm9260_gate_data {
83 unsigned int idx;
84 const char *name;
85 const char *parent_name;
86 u32 reg;
87 u8 bit_idx;
88 unsigned long flags;
89};
90
91struct asm9260_mux_clock {
92 u8 mask;
93 u32 *table;
94 const char *name;
95 const char **parent_names;
96 u8 num_parents;
97 unsigned long offset;
98 unsigned long flags;
99};
100
101static void __iomem *base;
102
103static const struct asm9260_div_clk asm9260_div_clks[] __initconst = {
104 { CLKID_SYS_CPU, "cpu_div", "main_gate", HW_CPUCLKDIV },
105 { CLKID_SYS_AHB, "ahb_div", "cpu_div", HW_SYSAHBCLKDIV },
106
107 /* i2s has two deviders: one for only external mclk and internal
108 * devider for all clks. */
109 { CLKID_SYS_I2S0M, "i2s0m_div", "i2s0_mclk", HW_I2S0MCLKDIV },
110 { CLKID_SYS_I2S1M, "i2s1m_div", "i2s1_mclk", HW_I2S1MCLKDIV },
111 { CLKID_SYS_I2S0S, "i2s0s_div", "i2s0_gate", HW_I2S0SCLKDIV },
112 { CLKID_SYS_I2S1S, "i2s1s_div", "i2s0_gate", HW_I2S1SCLKDIV },
113
114 { CLKID_SYS_UART0, "uart0_div", "uart_gate", HW_UART0CLKDIV },
115 { CLKID_SYS_UART1, "uart1_div", "uart_gate", HW_UART1CLKDIV },
116 { CLKID_SYS_UART2, "uart2_div", "uart_gate", HW_UART2CLKDIV },
117 { CLKID_SYS_UART3, "uart3_div", "uart_gate", HW_UART3CLKDIV },
118 { CLKID_SYS_UART4, "uart4_div", "uart_gate", HW_UART4CLKDIV },
119 { CLKID_SYS_UART5, "uart5_div", "uart_gate", HW_UART5CLKDIV },
120 { CLKID_SYS_UART6, "uart6_div", "uart_gate", HW_UART6CLKDIV },
121 { CLKID_SYS_UART7, "uart7_div", "uart_gate", HW_UART7CLKDIV },
122 { CLKID_SYS_UART8, "uart8_div", "uart_gate", HW_UART8CLKDIV },
123 { CLKID_SYS_UART9, "uart9_div", "uart_gate", HW_UART9CLKDIV },
124
125 { CLKID_SYS_SPI0, "spi0_div", "main_gate", HW_SPI0CLKDIV },
126 { CLKID_SYS_SPI1, "spi1_div", "main_gate", HW_SPI1CLKDIV },
127 { CLKID_SYS_QUADSPI, "quadspi_div", "main_gate", HW_QUADSPICLKDIV },
128 { CLKID_SYS_SSP0, "ssp0_div", "main_gate", HW_SSP0CLKDIV },
129 { CLKID_SYS_NAND, "nand_div", "main_gate", HW_NANDCLKDIV },
130 { CLKID_SYS_TRACE, "trace_div", "main_gate", HW_TRACECLKDIV },
131 { CLKID_SYS_CAMM, "camm_div", "main_gate", HW_CAMMCLKDIV },
132 { CLKID_SYS_MAC, "mac_div", "main_gate", HW_MACCLKDIV },
133 { CLKID_SYS_LCD, "lcd_div", "main_gate", HW_LCDCLKDIV },
134 { CLKID_SYS_ADCANA, "adcana_div", "main_gate", HW_ADCANACLKDIV },
135
136 { CLKID_SYS_WDT, "wdt_div", "wdt_gate", HW_WDTCLKDIV },
137 { CLKID_SYS_CLKOUT, "clkout_div", "clkout_gate", HW_CLKOUTCLKDIV },
138};
139
140static const struct asm9260_gate_data asm9260_mux_gates[] __initconst = {
141 { 0, "main_gate", "main_mux", HW_MAINCLKUEN, 0 },
142 { 0, "uart_gate", "uart_mux", HW_UARTCLKUEN, 0 },
143 { 0, "i2s0_gate", "i2s0_mux", HW_I2S0CLKUEN, 0 },
144 { 0, "i2s1_gate", "i2s1_mux", HW_I2S1CLKUEN, 0 },
145 { 0, "wdt_gate", "wdt_mux", HW_WDTCLKUEN, 0 },
146 { 0, "clkout_gate", "clkout_mux", HW_CLKOUTCLKUEN, 0 },
147};
148static const struct asm9260_gate_data asm9260_ahb_gates[] __initconst = {
149 /* ahb gates */
150 { CLKID_AHB_ROM, "rom", "ahb_div",
151 HW_AHBCLKCTRL0, 1, CLK_IGNORE_UNUSED},
152 { CLKID_AHB_RAM, "ram", "ahb_div",
153 HW_AHBCLKCTRL0, 2, CLK_IGNORE_UNUSED},
154 { CLKID_AHB_GPIO, "gpio", "ahb_div",
155 HW_AHBCLKCTRL0, 4 },
156 { CLKID_AHB_MAC, "mac", "ahb_div",
157 HW_AHBCLKCTRL0, 5 },
158 { CLKID_AHB_EMI, "emi", "ahb_div",
159 HW_AHBCLKCTRL0, 6, CLK_IGNORE_UNUSED},
160 { CLKID_AHB_USB0, "usb0", "ahb_div",
161 HW_AHBCLKCTRL0, 7 },
162 { CLKID_AHB_USB1, "usb1", "ahb_div",
163 HW_AHBCLKCTRL0, 8 },
164 { CLKID_AHB_DMA0, "dma0", "ahb_div",
165 HW_AHBCLKCTRL0, 9 },
166 { CLKID_AHB_DMA1, "dma1", "ahb_div",
167 HW_AHBCLKCTRL0, 10 },
168 { CLKID_AHB_UART0, "uart0", "ahb_div",
169 HW_AHBCLKCTRL0, 11 },
170 { CLKID_AHB_UART1, "uart1", "ahb_div",
171 HW_AHBCLKCTRL0, 12 },
172 { CLKID_AHB_UART2, "uart2", "ahb_div",
173 HW_AHBCLKCTRL0, 13 },
174 { CLKID_AHB_UART3, "uart3", "ahb_div",
175 HW_AHBCLKCTRL0, 14 },
176 { CLKID_AHB_UART4, "uart4", "ahb_div",
177 HW_AHBCLKCTRL0, 15 },
178 { CLKID_AHB_UART5, "uart5", "ahb_div",
179 HW_AHBCLKCTRL0, 16 },
180 { CLKID_AHB_UART6, "uart6", "ahb_div",
181 HW_AHBCLKCTRL0, 17 },
182 { CLKID_AHB_UART7, "uart7", "ahb_div",
183 HW_AHBCLKCTRL0, 18 },
184 { CLKID_AHB_UART8, "uart8", "ahb_div",
185 HW_AHBCLKCTRL0, 19 },
186 { CLKID_AHB_UART9, "uart9", "ahb_div",
187 HW_AHBCLKCTRL0, 20 },
188 { CLKID_AHB_I2S0, "i2s0", "ahb_div",
189 HW_AHBCLKCTRL0, 21 },
190 { CLKID_AHB_I2C0, "i2c0", "ahb_div",
191 HW_AHBCLKCTRL0, 22 },
192 { CLKID_AHB_I2C1, "i2c1", "ahb_div",
193 HW_AHBCLKCTRL0, 23 },
194 { CLKID_AHB_SSP0, "ssp0", "ahb_div",
195 HW_AHBCLKCTRL0, 24 },
196 { CLKID_AHB_IOCONFIG, "ioconf", "ahb_div",
197 HW_AHBCLKCTRL0, 25 },
198 { CLKID_AHB_WDT, "wdt", "ahb_div",
199 HW_AHBCLKCTRL0, 26 },
200 { CLKID_AHB_CAN0, "can0", "ahb_div",
201 HW_AHBCLKCTRL0, 27 },
202 { CLKID_AHB_CAN1, "can1", "ahb_div",
203 HW_AHBCLKCTRL0, 28 },
204 { CLKID_AHB_MPWM, "mpwm", "ahb_div",
205 HW_AHBCLKCTRL0, 29 },
206 { CLKID_AHB_SPI0, "spi0", "ahb_div",
207 HW_AHBCLKCTRL0, 30 },
208 { CLKID_AHB_SPI1, "spi1", "ahb_div",
209 HW_AHBCLKCTRL0, 31 },
210
211 { CLKID_AHB_QEI, "qei", "ahb_div",
212 HW_AHBCLKCTRL1, 0 },
213 { CLKID_AHB_QUADSPI0, "quadspi0", "ahb_div",
214 HW_AHBCLKCTRL1, 1 },
215 { CLKID_AHB_CAMIF, "capmif", "ahb_div",
216 HW_AHBCLKCTRL1, 2 },
217 { CLKID_AHB_LCDIF, "lcdif", "ahb_div",
218 HW_AHBCLKCTRL1, 3 },
219 { CLKID_AHB_TIMER0, "timer0", "ahb_div",
220 HW_AHBCLKCTRL1, 4 },
221 { CLKID_AHB_TIMER1, "timer1", "ahb_div",
222 HW_AHBCLKCTRL1, 5 },
223 { CLKID_AHB_TIMER2, "timer2", "ahb_div",
224 HW_AHBCLKCTRL1, 6 },
225 { CLKID_AHB_TIMER3, "timer3", "ahb_div",
226 HW_AHBCLKCTRL1, 7 },
227 { CLKID_AHB_IRQ, "irq", "ahb_div",
228 HW_AHBCLKCTRL1, 8, CLK_IGNORE_UNUSED},
229 { CLKID_AHB_RTC, "rtc", "ahb_div",
230 HW_AHBCLKCTRL1, 9 },
231 { CLKID_AHB_NAND, "nand", "ahb_div",
232 HW_AHBCLKCTRL1, 10 },
233 { CLKID_AHB_ADC0, "adc0", "ahb_div",
234 HW_AHBCLKCTRL1, 11 },
235 { CLKID_AHB_LED, "led", "ahb_div",
236 HW_AHBCLKCTRL1, 12 },
237 { CLKID_AHB_DAC0, "dac0", "ahb_div",
238 HW_AHBCLKCTRL1, 13 },
239 { CLKID_AHB_LCD, "lcd", "ahb_div",
240 HW_AHBCLKCTRL1, 14 },
241 { CLKID_AHB_I2S1, "i2s1", "ahb_div",
242 HW_AHBCLKCTRL1, 15 },
243 { CLKID_AHB_MAC1, "mac1", "ahb_div",
244 HW_AHBCLKCTRL1, 16 },
245};
246
247static const char __initdata *main_mux_p[] = { NULL, NULL };
248static const char __initdata *i2s0_mux_p[] = { NULL, NULL, "i2s0m_div"};
249static const char __initdata *i2s1_mux_p[] = { NULL, NULL, "i2s1m_div"};
250static const char __initdata *clkout_mux_p[] = { NULL, NULL, "rtc"};
251static u32 three_mux_table[] = {0, 1, 3};
252
253static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = {
254 { 1, three_mux_table, "main_mux", main_mux_p,
255 ARRAY_SIZE(main_mux_p), HW_MAINCLKSEL, },
256 { 1, three_mux_table, "uart_mux", main_mux_p,
257 ARRAY_SIZE(main_mux_p), HW_UARTCLKSEL, },
258 { 1, three_mux_table, "wdt_mux", main_mux_p,
259 ARRAY_SIZE(main_mux_p), HW_WDTCLKSEL, },
260 { 3, three_mux_table, "i2s0_mux", i2s0_mux_p,
261 ARRAY_SIZE(i2s0_mux_p), HW_I2S0CLKSEL, },
262 { 3, three_mux_table, "i2s1_mux", i2s1_mux_p,
263 ARRAY_SIZE(i2s1_mux_p), HW_I2S1CLKSEL, },
264 { 3, three_mux_table, "clkout_mux", clkout_mux_p,
265 ARRAY_SIZE(clkout_mux_p), HW_CLKOUTCLKSEL, },
266};
267
268static void __init asm9260_acc_init(struct device_node *np)
269{
270 struct clk *clk;
271 const char *ref_clk, *pll_clk = "pll";
272 u32 rate;
273 int n;
274 u32 accuracy = 0;
275
276 base = of_io_request_and_map(np, 0, np->name);
277 if (!base)
278 panic("%s: unable to map resource", np->name);
279
280 /* register pll */
281 rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000;
282
283 ref_clk = of_clk_get_parent_name(np, 0);
284 accuracy = clk_get_accuracy(__clk_lookup(ref_clk));
285 clk = clk_register_fixed_rate_with_accuracy(NULL, pll_clk,
286 ref_clk, 0, rate, accuracy);
287
288 if (IS_ERR(clk))
289 panic("%s: can't register REFCLK. Check DT!", np->name);
290
291 for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) {
292 const struct asm9260_mux_clock *mc = &asm9260_mux_clks[n];
293
294 mc->parent_names[0] = ref_clk;
295 mc->parent_names[1] = pll_clk;
296 clk = clk_register_mux_table(NULL, mc->name, mc->parent_names,
297 mc->num_parents, mc->flags, base + mc->offset,
298 0, mc->mask, 0, mc->table, &asm9260_clk_lock);
299 }
300
301 /* clock mux gate cells */
302 for (n = 0; n < ARRAY_SIZE(asm9260_mux_gates); n++) {
303 const struct asm9260_gate_data *gd = &asm9260_mux_gates[n];
304
305 clk = clk_register_gate(NULL, gd->name,
306 gd->parent_name, gd->flags | CLK_SET_RATE_PARENT,
307 base + gd->reg, gd->bit_idx, 0, &asm9260_clk_lock);
308 }
309
310 /* clock div cells */
311 for (n = 0; n < ARRAY_SIZE(asm9260_div_clks); n++) {
312 const struct asm9260_div_clk *dc = &asm9260_div_clks[n];
313
314 clks[dc->idx] = clk_register_divider(NULL, dc->name,
315 dc->parent_name, CLK_SET_RATE_PARENT,
316 base + dc->reg, 0, 8, CLK_DIVIDER_ONE_BASED,
317 &asm9260_clk_lock);
318 }
319
320 /* clock ahb gate cells */
321 for (n = 0; n < ARRAY_SIZE(asm9260_ahb_gates); n++) {
322 const struct asm9260_gate_data *gd = &asm9260_ahb_gates[n];
323
324 clks[gd->idx] = clk_register_gate(NULL, gd->name,
325 gd->parent_name, gd->flags, base + gd->reg,
326 gd->bit_idx, 0, &asm9260_clk_lock);
327 }
328
329 /* check for errors on leaf clocks */
330 for (n = 0; n < MAX_CLKS; n++) {
331 if (!IS_ERR(clks[n]))
332 continue;
333
334 pr_err("%s: Unable to register leaf clock %d\n",
335 np->full_name, n);
336 goto fail;
337 }
338
339 /* register clk-provider */
340 clk_data.clks = clks;
341 clk_data.clk_num = MAX_CLKS;
342 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
343 return;
344fail:
345 iounmap(base);
346}
347CLK_OF_DECLARE(asm9260_acc, "alphascale,asm9260-clock-controller",
348 asm9260_acc_init);
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
new file mode 100644
index 000000000000..c386ad25beb4
--- /dev/null
+++ b/drivers/clk/clk-cdce706.c
@@ -0,0 +1,700 @@
1/*
2 * TI CDCE706 programmable 3-PLL clock synthesizer driver
3 *
4 * Copyright (c) 2014 Cadence Design Systems Inc.
5 *
6 * Reference: http://www.ti.com/lit/ds/symlink/cdce706.pdf
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/clk-provider.h>
14#include <linux/delay.h>
15#include <linux/i2c.h>
16#include <linux/interrupt.h>
17#include <linux/mod_devicetable.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/rational.h>
21#include <linux/regmap.h>
22#include <linux/slab.h>
23
24#define CDCE706_CLKIN_CLOCK 10
25#define CDCE706_CLKIN_SOURCE 11
26#define CDCE706_PLL_M_LOW(pll) (1 + 3 * (pll))
27#define CDCE706_PLL_N_LOW(pll) (2 + 3 * (pll))
28#define CDCE706_PLL_HI(pll) (3 + 3 * (pll))
29#define CDCE706_PLL_MUX 3
30#define CDCE706_PLL_FVCO 6
31#define CDCE706_DIVIDER(div) (13 + (div))
32#define CDCE706_CLKOUT(out) (19 + (out))
33
34#define CDCE706_CLKIN_CLOCK_MASK 0x10
35#define CDCE706_CLKIN_SOURCE_SHIFT 6
36#define CDCE706_CLKIN_SOURCE_MASK 0xc0
37#define CDCE706_CLKIN_SOURCE_LVCMOS 0x40
38
39#define CDCE706_PLL_MUX_MASK(pll) (0x80 >> (pll))
40#define CDCE706_PLL_LOW_M_MASK 0xff
41#define CDCE706_PLL_LOW_N_MASK 0xff
42#define CDCE706_PLL_HI_M_MASK 0x1
43#define CDCE706_PLL_HI_N_MASK 0x1e
44#define CDCE706_PLL_HI_N_SHIFT 1
45#define CDCE706_PLL_M_MAX 0x1ff
46#define CDCE706_PLL_N_MAX 0xfff
47#define CDCE706_PLL_FVCO_MASK(pll) (0x80 >> (pll))
48#define CDCE706_PLL_FREQ_MIN 80000000
49#define CDCE706_PLL_FREQ_MAX 300000000
50#define CDCE706_PLL_FREQ_HI 180000000
51
52#define CDCE706_DIVIDER_PLL(div) (9 + (div) - ((div) > 2) - ((div) > 4))
53#define CDCE706_DIVIDER_PLL_SHIFT(div) ((div) < 2 ? 5 : 3 * ((div) & 1))
54#define CDCE706_DIVIDER_PLL_MASK(div) (0x7 << CDCE706_DIVIDER_PLL_SHIFT(div))
55#define CDCE706_DIVIDER_DIVIDER_MASK 0x7f
56#define CDCE706_DIVIDER_DIVIDER_MAX 0x7f
57
58#define CDCE706_CLKOUT_DIVIDER_MASK 0x7
59#define CDCE706_CLKOUT_ENABLE_MASK 0x8
60
61static struct regmap_config cdce706_regmap_config = {
62 .reg_bits = 8,
63 .val_bits = 8,
64 .val_format_endian = REGMAP_ENDIAN_NATIVE,
65};
66
67#define to_hw_data(phw) (container_of((phw), struct cdce706_hw_data, hw))
68
69struct cdce706_hw_data {
70 struct cdce706_dev_data *dev_data;
71 unsigned idx;
72 unsigned parent;
73 struct clk *clk;
74 struct clk_hw hw;
75 unsigned div;
76 unsigned mul;
77 unsigned mux;
78};
79
80struct cdce706_dev_data {
81 struct i2c_client *client;
82 struct regmap *regmap;
83 struct clk_onecell_data onecell;
84 struct clk *clks[6];
85 struct clk *clkin_clk[2];
86 const char *clkin_name[2];
87 struct cdce706_hw_data clkin[1];
88 struct cdce706_hw_data pll[3];
89 struct cdce706_hw_data divider[6];
90 struct cdce706_hw_data clkout[6];
91};
92
93static const char * const cdce706_source_name[] = {
94 "clk_in0", "clk_in1",
95};
96
97static const char *cdce706_clkin_name[] = {
98 "clk_in",
99};
100
101static const char * const cdce706_pll_name[] = {
102 "pll1", "pll2", "pll3",
103};
104
105static const char *cdce706_divider_parent_name[] = {
106 "clk_in", "pll1", "pll2", "pll2", "pll3",
107};
108
109static const char *cdce706_divider_name[] = {
110 "p0", "p1", "p2", "p3", "p4", "p5",
111};
112
113static const char * const cdce706_clkout_name[] = {
114 "clk_out0", "clk_out1", "clk_out2", "clk_out3", "clk_out4", "clk_out5",
115};
116
117static int cdce706_reg_read(struct cdce706_dev_data *dev_data, unsigned reg,
118 unsigned *val)
119{
120 int rc = regmap_read(dev_data->regmap, reg | 0x80, val);
121
122 if (rc < 0)
123 dev_err(&dev_data->client->dev, "error reading reg %u", reg);
124 return rc;
125}
126
127static int cdce706_reg_write(struct cdce706_dev_data *dev_data, unsigned reg,
128 unsigned val)
129{
130 int rc = regmap_write(dev_data->regmap, reg | 0x80, val);
131
132 if (rc < 0)
133 dev_err(&dev_data->client->dev, "error writing reg %u", reg);
134 return rc;
135}
136
137static int cdce706_reg_update(struct cdce706_dev_data *dev_data, unsigned reg,
138 unsigned mask, unsigned val)
139{
140 int rc = regmap_update_bits(dev_data->regmap, reg | 0x80, mask, val);
141
142 if (rc < 0)
143 dev_err(&dev_data->client->dev, "error updating reg %u", reg);
144 return rc;
145}
146
147static int cdce706_clkin_set_parent(struct clk_hw *hw, u8 index)
148{
149 struct cdce706_hw_data *hwd = to_hw_data(hw);
150
151 hwd->parent = index;
152 return 0;
153}
154
155static u8 cdce706_clkin_get_parent(struct clk_hw *hw)
156{
157 struct cdce706_hw_data *hwd = to_hw_data(hw);
158
159 return hwd->parent;
160}
161
162static const struct clk_ops cdce706_clkin_ops = {
163 .set_parent = cdce706_clkin_set_parent,
164 .get_parent = cdce706_clkin_get_parent,
165};
166
167static unsigned long cdce706_pll_recalc_rate(struct clk_hw *hw,
168 unsigned long parent_rate)
169{
170 struct cdce706_hw_data *hwd = to_hw_data(hw);
171
172 dev_dbg(&hwd->dev_data->client->dev,
173 "%s, pll: %d, mux: %d, mul: %u, div: %u\n",
174 __func__, hwd->idx, hwd->mux, hwd->mul, hwd->div);
175
176 if (!hwd->mux) {
177 if (hwd->div && hwd->mul) {
178 u64 res = (u64)parent_rate * hwd->mul;
179
180 do_div(res, hwd->div);
181 return res;
182 }
183 } else {
184 if (hwd->div)
185 return parent_rate / hwd->div;
186 }
187 return 0;
188}
189
190static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
191 unsigned long *parent_rate)
192{
193 struct cdce706_hw_data *hwd = to_hw_data(hw);
194 unsigned long mul, div;
195 u64 res;
196
197 dev_dbg(&hwd->dev_data->client->dev,
198 "%s, rate: %lu, parent_rate: %lu\n",
199 __func__, rate, *parent_rate);
200
201 rational_best_approximation(rate, *parent_rate,
202 CDCE706_PLL_N_MAX, CDCE706_PLL_M_MAX,
203 &mul, &div);
204 hwd->mul = mul;
205 hwd->div = div;
206
207 dev_dbg(&hwd->dev_data->client->dev,
208 "%s, pll: %d, mul: %lu, div: %lu\n",
209 __func__, hwd->idx, mul, div);
210
211 res = (u64)*parent_rate * hwd->mul;
212 do_div(res, hwd->div);
213 return res;
214}
215
216static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
217 unsigned long parent_rate)
218{
219 struct cdce706_hw_data *hwd = to_hw_data(hw);
220 unsigned long mul = hwd->mul, div = hwd->div;
221 int err;
222
223 dev_dbg(&hwd->dev_data->client->dev,
224 "%s, pll: %d, mul: %lu, div: %lu\n",
225 __func__, hwd->idx, mul, div);
226
227 err = cdce706_reg_update(hwd->dev_data,
228 CDCE706_PLL_HI(hwd->idx),
229 CDCE706_PLL_HI_M_MASK | CDCE706_PLL_HI_N_MASK,
230 ((div >> 8) & CDCE706_PLL_HI_M_MASK) |
231 ((mul >> (8 - CDCE706_PLL_HI_N_SHIFT)) &
232 CDCE706_PLL_HI_N_MASK));
233 if (err < 0)
234 return err;
235
236 err = cdce706_reg_write(hwd->dev_data,
237 CDCE706_PLL_M_LOW(hwd->idx),
238 div & CDCE706_PLL_LOW_M_MASK);
239 if (err < 0)
240 return err;
241
242 err = cdce706_reg_write(hwd->dev_data,
243 CDCE706_PLL_N_LOW(hwd->idx),
244 mul & CDCE706_PLL_LOW_N_MASK);
245 if (err < 0)
246 return err;
247
248 err = cdce706_reg_update(hwd->dev_data,
249 CDCE706_PLL_FVCO,
250 CDCE706_PLL_FVCO_MASK(hwd->idx),
251 rate > CDCE706_PLL_FREQ_HI ?
252 CDCE706_PLL_FVCO_MASK(hwd->idx) : 0);
253 return err;
254}
255
256static const struct clk_ops cdce706_pll_ops = {
257 .recalc_rate = cdce706_pll_recalc_rate,
258 .round_rate = cdce706_pll_round_rate,
259 .set_rate = cdce706_pll_set_rate,
260};
261
262static int cdce706_divider_set_parent(struct clk_hw *hw, u8 index)
263{
264 struct cdce706_hw_data *hwd = to_hw_data(hw);
265
266 if (hwd->parent == index)
267 return 0;
268 hwd->parent = index;
269 return cdce706_reg_update(hwd->dev_data,
270 CDCE706_DIVIDER_PLL(hwd->idx),
271 CDCE706_DIVIDER_PLL_MASK(hwd->idx),
272 index << CDCE706_DIVIDER_PLL_SHIFT(hwd->idx));
273}
274
275static u8 cdce706_divider_get_parent(struct clk_hw *hw)
276{
277 struct cdce706_hw_data *hwd = to_hw_data(hw);
278
279 return hwd->parent;
280}
281
282static unsigned long cdce706_divider_recalc_rate(struct clk_hw *hw,
283 unsigned long parent_rate)
284{
285 struct cdce706_hw_data *hwd = to_hw_data(hw);
286
287 dev_dbg(&hwd->dev_data->client->dev,
288 "%s, divider: %d, div: %u\n",
289 __func__, hwd->idx, hwd->div);
290 if (hwd->div)
291 return parent_rate / hwd->div;
292 return 0;
293}
294
295static long cdce706_divider_round_rate(struct clk_hw *hw, unsigned long rate,
296 unsigned long *parent_rate)
297{
298 struct cdce706_hw_data *hwd = to_hw_data(hw);
299 struct cdce706_dev_data *cdce = hwd->dev_data;
300 unsigned long mul, div;
301
302 dev_dbg(&hwd->dev_data->client->dev,
303 "%s, rate: %lu, parent_rate: %lu\n",
304 __func__, rate, *parent_rate);
305
306 rational_best_approximation(rate, *parent_rate,
307 1, CDCE706_DIVIDER_DIVIDER_MAX,
308 &mul, &div);
309 if (!mul)
310 div = CDCE706_DIVIDER_DIVIDER_MAX;
311
312 if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
313 unsigned long best_diff = rate;
314 unsigned long best_div = 0;
315 struct clk *gp_clk = cdce->clkin_clk[cdce->clkin[0].parent];
316 unsigned long gp_rate = gp_clk ? clk_get_rate(gp_clk) : 0;
317
318 for (div = CDCE706_PLL_FREQ_MIN / rate; best_diff &&
319 div <= CDCE706_PLL_FREQ_MAX / rate; ++div) {
320 unsigned long n, m;
321 unsigned long diff;
322 unsigned long div_rate;
323 u64 div_rate64;
324
325 if (rate * div < CDCE706_PLL_FREQ_MIN)
326 continue;
327
328 rational_best_approximation(rate * div, gp_rate,
329 CDCE706_PLL_N_MAX,
330 CDCE706_PLL_M_MAX,
331 &n, &m);
332 div_rate64 = (u64)gp_rate * n;
333 do_div(div_rate64, m);
334 do_div(div_rate64, div);
335 div_rate = div_rate64;
336 diff = max(div_rate, rate) - min(div_rate, rate);
337
338 if (diff < best_diff) {
339 best_diff = diff;
340 best_div = div;
341 dev_dbg(&hwd->dev_data->client->dev,
342 "%s, %lu * %lu / %lu / %lu = %lu\n",
343 __func__, gp_rate, n, m, div, div_rate);
344 }
345 }
346
347 div = best_div;
348
349 dev_dbg(&hwd->dev_data->client->dev,
350 "%s, altering parent rate: %lu -> %lu\n",
351 __func__, *parent_rate, rate * div);
352 *parent_rate = rate * div;
353 }
354 hwd->div = div;
355
356 dev_dbg(&hwd->dev_data->client->dev,
357 "%s, divider: %d, div: %lu\n",
358 __func__, hwd->idx, div);
359
360 return *parent_rate / div;
361}
362
363static int cdce706_divider_set_rate(struct clk_hw *hw, unsigned long rate,
364 unsigned long parent_rate)
365{
366 struct cdce706_hw_data *hwd = to_hw_data(hw);
367
368 dev_dbg(&hwd->dev_data->client->dev,
369 "%s, divider: %d, div: %u\n",
370 __func__, hwd->idx, hwd->div);
371
372 return cdce706_reg_update(hwd->dev_data,
373 CDCE706_DIVIDER(hwd->idx),
374 CDCE706_DIVIDER_DIVIDER_MASK,
375 hwd->div);
376}
377
378static const struct clk_ops cdce706_divider_ops = {
379 .set_parent = cdce706_divider_set_parent,
380 .get_parent = cdce706_divider_get_parent,
381 .recalc_rate = cdce706_divider_recalc_rate,
382 .round_rate = cdce706_divider_round_rate,
383 .set_rate = cdce706_divider_set_rate,
384};
385
386static int cdce706_clkout_prepare(struct clk_hw *hw)
387{
388 struct cdce706_hw_data *hwd = to_hw_data(hw);
389
390 return cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx),
391 CDCE706_CLKOUT_ENABLE_MASK,
392 CDCE706_CLKOUT_ENABLE_MASK);
393}
394
395static void cdce706_clkout_unprepare(struct clk_hw *hw)
396{
397 struct cdce706_hw_data *hwd = to_hw_data(hw);
398
399 cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx),
400 CDCE706_CLKOUT_ENABLE_MASK, 0);
401}
402
403static int cdce706_clkout_set_parent(struct clk_hw *hw, u8 index)
404{
405 struct cdce706_hw_data *hwd = to_hw_data(hw);
406
407 if (hwd->parent == index)
408 return 0;
409 hwd->parent = index;
410 return cdce706_reg_update(hwd->dev_data,
411 CDCE706_CLKOUT(hwd->idx),
412 CDCE706_CLKOUT_ENABLE_MASK, index);
413}
414
415static u8 cdce706_clkout_get_parent(struct clk_hw *hw)
416{
417 struct cdce706_hw_data *hwd = to_hw_data(hw);
418
419 return hwd->parent;
420}
421
422static unsigned long cdce706_clkout_recalc_rate(struct clk_hw *hw,
423 unsigned long parent_rate)
424{
425 return parent_rate;
426}
427
428static long cdce706_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
429 unsigned long *parent_rate)
430{
431 *parent_rate = rate;
432 return rate;
433}
434
435static int cdce706_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
436 unsigned long parent_rate)
437{
438 return 0;
439}
440
441static const struct clk_ops cdce706_clkout_ops = {
442 .prepare = cdce706_clkout_prepare,
443 .unprepare = cdce706_clkout_unprepare,
444 .set_parent = cdce706_clkout_set_parent,
445 .get_parent = cdce706_clkout_get_parent,
446 .recalc_rate = cdce706_clkout_recalc_rate,
447 .round_rate = cdce706_clkout_round_rate,
448 .set_rate = cdce706_clkout_set_rate,
449};
450
451static int cdce706_register_hw(struct cdce706_dev_data *cdce,
452 struct cdce706_hw_data *hw, unsigned num_hw,
453 const char * const *clk_names,
454 struct clk_init_data *init)
455{
456 unsigned i;
457
458 for (i = 0; i < num_hw; ++i, ++hw) {
459 init->name = clk_names[i];
460 hw->dev_data = cdce;
461 hw->idx = i;
462 hw->hw.init = init;
463 hw->clk = devm_clk_register(&cdce->client->dev,
464 &hw->hw);
465 if (IS_ERR(hw->clk)) {
466 dev_err(&cdce->client->dev, "Failed to register %s\n",
467 clk_names[i]);
468 return PTR_ERR(hw->clk);
469 }
470 }
471 return 0;
472}
473
474static int cdce706_register_clkin(struct cdce706_dev_data *cdce)
475{
476 struct clk_init_data init = {
477 .ops = &cdce706_clkin_ops,
478 .parent_names = cdce->clkin_name,
479 .num_parents = ARRAY_SIZE(cdce->clkin_name),
480 };
481 unsigned i;
482 int ret;
483 unsigned clock, source;
484
485 for (i = 0; i < ARRAY_SIZE(cdce->clkin_name); ++i) {
486 struct clk *parent = devm_clk_get(&cdce->client->dev,
487 cdce706_source_name[i]);
488
489 if (IS_ERR(parent)) {
490 cdce->clkin_name[i] = cdce706_source_name[i];
491 } else {
492 cdce->clkin_name[i] = __clk_get_name(parent);
493 cdce->clkin_clk[i] = parent;
494 }
495 }
496
497 ret = cdce706_reg_read(cdce, CDCE706_CLKIN_SOURCE, &source);
498 if (ret < 0)
499 return ret;
500 if ((source & CDCE706_CLKIN_SOURCE_MASK) ==
501 CDCE706_CLKIN_SOURCE_LVCMOS) {
502 ret = cdce706_reg_read(cdce, CDCE706_CLKIN_CLOCK, &clock);
503 if (ret < 0)
504 return ret;
505 cdce->clkin[0].parent = !!(clock & CDCE706_CLKIN_CLOCK_MASK);
506 }
507
508 ret = cdce706_register_hw(cdce, cdce->clkin,
509 ARRAY_SIZE(cdce->clkin),
510 cdce706_clkin_name, &init);
511 return ret;
512}
513
514static int cdce706_register_plls(struct cdce706_dev_data *cdce)
515{
516 struct clk_init_data init = {
517 .ops = &cdce706_pll_ops,
518 .parent_names = cdce706_clkin_name,
519 .num_parents = ARRAY_SIZE(cdce706_clkin_name),
520 };
521 unsigned i;
522 int ret;
523 unsigned mux;
524
525 ret = cdce706_reg_read(cdce, CDCE706_PLL_MUX, &mux);
526 if (ret < 0)
527 return ret;
528
529 for (i = 0; i < ARRAY_SIZE(cdce->pll); ++i) {
530 unsigned m, n, v;
531
532 ret = cdce706_reg_read(cdce, CDCE706_PLL_M_LOW(i), &m);
533 if (ret < 0)
534 return ret;
535 ret = cdce706_reg_read(cdce, CDCE706_PLL_N_LOW(i), &n);
536 if (ret < 0)
537 return ret;
538 ret = cdce706_reg_read(cdce, CDCE706_PLL_HI(i), &v);
539 if (ret < 0)
540 return ret;
541 cdce->pll[i].div = m | ((v & CDCE706_PLL_HI_M_MASK) << 8);
542 cdce->pll[i].mul = n | ((v & CDCE706_PLL_HI_N_MASK) <<
543 (8 - CDCE706_PLL_HI_N_SHIFT));
544 cdce->pll[i].mux = mux & CDCE706_PLL_MUX_MASK(i);
545 dev_dbg(&cdce->client->dev,
546 "%s: i: %u, div: %u, mul: %u, mux: %d\n", __func__, i,
547 cdce->pll[i].div, cdce->pll[i].mul, cdce->pll[i].mux);
548 }
549
550 ret = cdce706_register_hw(cdce, cdce->pll,
551 ARRAY_SIZE(cdce->pll),
552 cdce706_pll_name, &init);
553 return ret;
554}
555
556static int cdce706_register_dividers(struct cdce706_dev_data *cdce)
557{
558 struct clk_init_data init = {
559 .ops = &cdce706_divider_ops,
560 .parent_names = cdce706_divider_parent_name,
561 .num_parents = ARRAY_SIZE(cdce706_divider_parent_name),
562 .flags = CLK_SET_RATE_PARENT,
563 };
564 unsigned i;
565 int ret;
566
567 for (i = 0; i < ARRAY_SIZE(cdce->divider); ++i) {
568 unsigned val;
569
570 ret = cdce706_reg_read(cdce, CDCE706_DIVIDER_PLL(i), &val);
571 if (ret < 0)
572 return ret;
573 cdce->divider[i].parent =
574 (val & CDCE706_DIVIDER_PLL_MASK(i)) >>
575 CDCE706_DIVIDER_PLL_SHIFT(i);
576
577 ret = cdce706_reg_read(cdce, CDCE706_DIVIDER(i), &val);
578 if (ret < 0)
579 return ret;
580 cdce->divider[i].div = val & CDCE706_DIVIDER_DIVIDER_MASK;
581 dev_dbg(&cdce->client->dev,
582 "%s: i: %u, parent: %u, div: %u\n", __func__, i,
583 cdce->divider[i].parent, cdce->divider[i].div);
584 }
585
586 ret = cdce706_register_hw(cdce, cdce->divider,
587 ARRAY_SIZE(cdce->divider),
588 cdce706_divider_name, &init);
589 return ret;
590}
591
592static int cdce706_register_clkouts(struct cdce706_dev_data *cdce)
593{
594 struct clk_init_data init = {
595 .ops = &cdce706_clkout_ops,
596 .parent_names = cdce706_divider_name,
597 .num_parents = ARRAY_SIZE(cdce706_divider_name),
598 .flags = CLK_SET_RATE_PARENT,
599 };
600 unsigned i;
601 int ret;
602
603 for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i) {
604 unsigned val;
605
606 ret = cdce706_reg_read(cdce, CDCE706_CLKOUT(i), &val);
607 if (ret < 0)
608 return ret;
609 cdce->clkout[i].parent = val & CDCE706_CLKOUT_DIVIDER_MASK;
610 dev_dbg(&cdce->client->dev,
611 "%s: i: %u, parent: %u\n", __func__, i,
612 cdce->clkout[i].parent);
613 }
614
615 ret = cdce706_register_hw(cdce, cdce->clkout,
616 ARRAY_SIZE(cdce->clkout),
617 cdce706_clkout_name, &init);
618 for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i)
619 cdce->clks[i] = cdce->clkout[i].clk;
620
621 return ret;
622}
623
624static int cdce706_probe(struct i2c_client *client,
625 const struct i2c_device_id *id)
626{
627 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
628 struct cdce706_dev_data *cdce;
629 int ret;
630
631 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
632 return -EIO;
633
634 cdce = devm_kzalloc(&client->dev, sizeof(*cdce), GFP_KERNEL);
635 if (!cdce)
636 return -ENOMEM;
637
638 cdce->client = client;
639 cdce->regmap = devm_regmap_init_i2c(client, &cdce706_regmap_config);
640 if (IS_ERR(cdce->regmap)) {
641 dev_err(&client->dev, "Failed to initialize regmap\n");
642 return -EINVAL;
643 }
644
645 i2c_set_clientdata(client, cdce);
646
647 ret = cdce706_register_clkin(cdce);
648 if (ret < 0)
649 return ret;
650 ret = cdce706_register_plls(cdce);
651 if (ret < 0)
652 return ret;
653 ret = cdce706_register_dividers(cdce);
654 if (ret < 0)
655 return ret;
656 ret = cdce706_register_clkouts(cdce);
657 if (ret < 0)
658 return ret;
659 cdce->onecell.clks = cdce->clks;
660 cdce->onecell.clk_num = ARRAY_SIZE(cdce->clks);
661 ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
662 &cdce->onecell);
663
664 return ret;
665}
666
667static int cdce706_remove(struct i2c_client *client)
668{
669 return 0;
670}
671
672
673#ifdef CONFIG_OF
674static const struct of_device_id cdce706_dt_match[] = {
675 { .compatible = "ti,cdce706" },
676 { },
677};
678MODULE_DEVICE_TABLE(of, cdce706_dt_match);
679#endif
680
681static const struct i2c_device_id cdce706_id[] = {
682 { "cdce706", 0 },
683 { }
684};
685MODULE_DEVICE_TABLE(i2c, cdce706_id);
686
687static struct i2c_driver cdce706_i2c_driver = {
688 .driver = {
689 .name = "cdce706",
690 .of_match_table = of_match_ptr(cdce706_dt_match),
691 },
692 .probe = cdce706_probe,
693 .remove = cdce706_remove,
694 .id_table = cdce706_id,
695};
696module_i2c_driver(cdce706_i2c_driver);
697
698MODULE_AUTHOR("Max Filippov <jcmvbkbc@gmail.com>");
699MODULE_DESCRIPTION("TI CDCE 706 clock synthesizer driver");
700MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 4386697236a7..956b7e54fa1c 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -27,7 +27,7 @@ static u8 clk_composite_get_parent(struct clk_hw *hw)
27 const struct clk_ops *mux_ops = composite->mux_ops; 27 const struct clk_ops *mux_ops = composite->mux_ops;
28 struct clk_hw *mux_hw = composite->mux_hw; 28 struct clk_hw *mux_hw = composite->mux_hw;
29 29
30 mux_hw->clk = hw->clk; 30 __clk_hw_set_clk(mux_hw, hw);
31 31
32 return mux_ops->get_parent(mux_hw); 32 return mux_ops->get_parent(mux_hw);
33} 33}
@@ -38,7 +38,7 @@ static int clk_composite_set_parent(struct clk_hw *hw, u8 index)
38 const struct clk_ops *mux_ops = composite->mux_ops; 38 const struct clk_ops *mux_ops = composite->mux_ops;
39 struct clk_hw *mux_hw = composite->mux_hw; 39 struct clk_hw *mux_hw = composite->mux_hw;
40 40
41 mux_hw->clk = hw->clk; 41 __clk_hw_set_clk(mux_hw, hw);
42 42
43 return mux_ops->set_parent(mux_hw, index); 43 return mux_ops->set_parent(mux_hw, index);
44} 44}
@@ -50,12 +50,14 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
50 const struct clk_ops *rate_ops = composite->rate_ops; 50 const struct clk_ops *rate_ops = composite->rate_ops;
51 struct clk_hw *rate_hw = composite->rate_hw; 51 struct clk_hw *rate_hw = composite->rate_hw;
52 52
53 rate_hw->clk = hw->clk; 53 __clk_hw_set_clk(rate_hw, hw);
54 54
55 return rate_ops->recalc_rate(rate_hw, parent_rate); 55 return rate_ops->recalc_rate(rate_hw, parent_rate);
56} 56}
57 57
58static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate, 58static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
59 unsigned long min_rate,
60 unsigned long max_rate,
59 unsigned long *best_parent_rate, 61 unsigned long *best_parent_rate,
60 struct clk_hw **best_parent_p) 62 struct clk_hw **best_parent_p)
61{ 63{
@@ -72,8 +74,10 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
72 int i; 74 int i;
73 75
74 if (rate_hw && rate_ops && rate_ops->determine_rate) { 76 if (rate_hw && rate_ops && rate_ops->determine_rate) {
75 rate_hw->clk = hw->clk; 77 __clk_hw_set_clk(rate_hw, hw);
76 return rate_ops->determine_rate(rate_hw, rate, best_parent_rate, 78 return rate_ops->determine_rate(rate_hw, rate, min_rate,
79 max_rate,
80 best_parent_rate,
77 best_parent_p); 81 best_parent_p);
78 } else if (rate_hw && rate_ops && rate_ops->round_rate && 82 } else if (rate_hw && rate_ops && rate_ops->round_rate &&
79 mux_hw && mux_ops && mux_ops->set_parent) { 83 mux_hw && mux_ops && mux_ops->set_parent) {
@@ -116,8 +120,9 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
116 120
117 return best_rate; 121 return best_rate;
118 } else if (mux_hw && mux_ops && mux_ops->determine_rate) { 122 } else if (mux_hw && mux_ops && mux_ops->determine_rate) {
119 mux_hw->clk = hw->clk; 123 __clk_hw_set_clk(mux_hw, hw);
120 return mux_ops->determine_rate(mux_hw, rate, best_parent_rate, 124 return mux_ops->determine_rate(mux_hw, rate, min_rate,
125 max_rate, best_parent_rate,
121 best_parent_p); 126 best_parent_p);
122 } else { 127 } else {
123 pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n"); 128 pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n");
@@ -132,7 +137,7 @@ static long clk_composite_round_rate(struct clk_hw *hw, unsigned long rate,
132 const struct clk_ops *rate_ops = composite->rate_ops; 137 const struct clk_ops *rate_ops = composite->rate_ops;
133 struct clk_hw *rate_hw = composite->rate_hw; 138 struct clk_hw *rate_hw = composite->rate_hw;
134 139
135 rate_hw->clk = hw->clk; 140 __clk_hw_set_clk(rate_hw, hw);
136 141
137 return rate_ops->round_rate(rate_hw, rate, prate); 142 return rate_ops->round_rate(rate_hw, rate, prate);
138} 143}
@@ -144,7 +149,7 @@ static int clk_composite_set_rate(struct clk_hw *hw, unsigned long rate,
144 const struct clk_ops *rate_ops = composite->rate_ops; 149 const struct clk_ops *rate_ops = composite->rate_ops;
145 struct clk_hw *rate_hw = composite->rate_hw; 150 struct clk_hw *rate_hw = composite->rate_hw;
146 151
147 rate_hw->clk = hw->clk; 152 __clk_hw_set_clk(rate_hw, hw);
148 153
149 return rate_ops->set_rate(rate_hw, rate, parent_rate); 154 return rate_ops->set_rate(rate_hw, rate, parent_rate);
150} 155}
@@ -155,7 +160,7 @@ static int clk_composite_is_enabled(struct clk_hw *hw)
155 const struct clk_ops *gate_ops = composite->gate_ops; 160 const struct clk_ops *gate_ops = composite->gate_ops;
156 struct clk_hw *gate_hw = composite->gate_hw; 161 struct clk_hw *gate_hw = composite->gate_hw;
157 162
158 gate_hw->clk = hw->clk; 163 __clk_hw_set_clk(gate_hw, hw);
159 164
160 return gate_ops->is_enabled(gate_hw); 165 return gate_ops->is_enabled(gate_hw);
161} 166}
@@ -166,7 +171,7 @@ static int clk_composite_enable(struct clk_hw *hw)
166 const struct clk_ops *gate_ops = composite->gate_ops; 171 const struct clk_ops *gate_ops = composite->gate_ops;
167 struct clk_hw *gate_hw = composite->gate_hw; 172 struct clk_hw *gate_hw = composite->gate_hw;
168 173
169 gate_hw->clk = hw->clk; 174 __clk_hw_set_clk(gate_hw, hw);
170 175
171 return gate_ops->enable(gate_hw); 176 return gate_ops->enable(gate_hw);
172} 177}
@@ -177,7 +182,7 @@ static void clk_composite_disable(struct clk_hw *hw)
177 const struct clk_ops *gate_ops = composite->gate_ops; 182 const struct clk_ops *gate_ops = composite->gate_ops;
178 struct clk_hw *gate_hw = composite->gate_hw; 183 struct clk_hw *gate_hw = composite->gate_hw;
179 184
180 gate_hw->clk = hw->clk; 185 __clk_hw_set_clk(gate_hw, hw);
181 186
182 gate_ops->disable(gate_hw); 187 gate_ops->disable(gate_hw);
183} 188}
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index c0a842b335c5..db7f8bce7467 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -30,7 +30,7 @@
30 30
31#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) 31#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
32 32
33#define div_mask(d) ((1 << ((d)->width)) - 1) 33#define div_mask(width) ((1 << (width)) - 1)
34 34
35static unsigned int _get_table_maxdiv(const struct clk_div_table *table) 35static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
36{ 36{
@@ -54,15 +54,16 @@ static unsigned int _get_table_mindiv(const struct clk_div_table *table)
54 return mindiv; 54 return mindiv;
55} 55}
56 56
57static unsigned int _get_maxdiv(struct clk_divider *divider) 57static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
58 unsigned long flags)
58{ 59{
59 if (divider->flags & CLK_DIVIDER_ONE_BASED) 60 if (flags & CLK_DIVIDER_ONE_BASED)
60 return div_mask(divider); 61 return div_mask(width);
61 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 62 if (flags & CLK_DIVIDER_POWER_OF_TWO)
62 return 1 << div_mask(divider); 63 return 1 << div_mask(width);
63 if (divider->table) 64 if (table)
64 return _get_table_maxdiv(divider->table); 65 return _get_table_maxdiv(table);
65 return div_mask(divider) + 1; 66 return div_mask(width) + 1;
66} 67}
67 68
68static unsigned int _get_table_div(const struct clk_div_table *table, 69static unsigned int _get_table_div(const struct clk_div_table *table,
@@ -76,14 +77,15 @@ static unsigned int _get_table_div(const struct clk_div_table *table,
76 return 0; 77 return 0;
77} 78}
78 79
79static unsigned int _get_div(struct clk_divider *divider, unsigned int val) 80static unsigned int _get_div(const struct clk_div_table *table,
81 unsigned int val, unsigned long flags)
80{ 82{
81 if (divider->flags & CLK_DIVIDER_ONE_BASED) 83 if (flags & CLK_DIVIDER_ONE_BASED)
82 return val; 84 return val;
83 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 85 if (flags & CLK_DIVIDER_POWER_OF_TWO)
84 return 1 << val; 86 return 1 << val;
85 if (divider->table) 87 if (table)
86 return _get_table_div(divider->table, val); 88 return _get_table_div(table, val);
87 return val + 1; 89 return val + 1;
88} 90}
89 91
@@ -98,29 +100,28 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
98 return 0; 100 return 0;
99} 101}
100 102
101static unsigned int _get_val(struct clk_divider *divider, unsigned int div) 103static unsigned int _get_val(const struct clk_div_table *table,
104 unsigned int div, unsigned long flags)
102{ 105{
103 if (divider->flags & CLK_DIVIDER_ONE_BASED) 106 if (flags & CLK_DIVIDER_ONE_BASED)
104 return div; 107 return div;
105 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 108 if (flags & CLK_DIVIDER_POWER_OF_TWO)
106 return __ffs(div); 109 return __ffs(div);
107 if (divider->table) 110 if (table)
108 return _get_table_val(divider->table, div); 111 return _get_table_val(table, div);
109 return div - 1; 112 return div - 1;
110} 113}
111 114
112static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, 115unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
113 unsigned long parent_rate) 116 unsigned int val,
117 const struct clk_div_table *table,
118 unsigned long flags)
114{ 119{
115 struct clk_divider *divider = to_clk_divider(hw); 120 unsigned int div;
116 unsigned int div, val;
117 121
118 val = clk_readl(divider->reg) >> divider->shift; 122 div = _get_div(table, val, flags);
119 val &= div_mask(divider);
120
121 div = _get_div(divider, val);
122 if (!div) { 123 if (!div) {
123 WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO), 124 WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO),
124 "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n", 125 "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
125 __clk_get_name(hw->clk)); 126 __clk_get_name(hw->clk));
126 return parent_rate; 127 return parent_rate;
@@ -128,6 +129,20 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
128 129
129 return DIV_ROUND_UP(parent_rate, div); 130 return DIV_ROUND_UP(parent_rate, div);
130} 131}
132EXPORT_SYMBOL_GPL(divider_recalc_rate);
133
134static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
135 unsigned long parent_rate)
136{
137 struct clk_divider *divider = to_clk_divider(hw);
138 unsigned int val;
139
140 val = clk_readl(divider->reg) >> divider->shift;
141 val &= div_mask(divider->width);
142
143 return divider_recalc_rate(hw, parent_rate, val, divider->table,
144 divider->flags);
145}
131 146
132/* 147/*
133 * The reverse of DIV_ROUND_UP: The maximum number which 148 * The reverse of DIV_ROUND_UP: The maximum number which
@@ -146,12 +161,13 @@ static bool _is_valid_table_div(const struct clk_div_table *table,
146 return false; 161 return false;
147} 162}
148 163
149static bool _is_valid_div(struct clk_divider *divider, unsigned int div) 164static bool _is_valid_div(const struct clk_div_table *table, unsigned int div,
165 unsigned long flags)
150{ 166{
151 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 167 if (flags & CLK_DIVIDER_POWER_OF_TWO)
152 return is_power_of_2(div); 168 return is_power_of_2(div);
153 if (divider->table) 169 if (table)
154 return _is_valid_table_div(divider->table, div); 170 return _is_valid_table_div(table, div);
155 return true; 171 return true;
156} 172}
157 173
@@ -191,71 +207,76 @@ static int _round_down_table(const struct clk_div_table *table, int div)
191 return down; 207 return down;
192} 208}
193 209
194static int _div_round_up(struct clk_divider *divider, 210static int _div_round_up(const struct clk_div_table *table,
195 unsigned long parent_rate, unsigned long rate) 211 unsigned long parent_rate, unsigned long rate,
212 unsigned long flags)
196{ 213{
197 int div = DIV_ROUND_UP(parent_rate, rate); 214 int div = DIV_ROUND_UP(parent_rate, rate);
198 215
199 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 216 if (flags & CLK_DIVIDER_POWER_OF_TWO)
200 div = __roundup_pow_of_two(div); 217 div = __roundup_pow_of_two(div);
201 if (divider->table) 218 if (table)
202 div = _round_up_table(divider->table, div); 219 div = _round_up_table(table, div);
203 220
204 return div; 221 return div;
205} 222}
206 223
207static int _div_round_closest(struct clk_divider *divider, 224static int _div_round_closest(const struct clk_div_table *table,
208 unsigned long parent_rate, unsigned long rate) 225 unsigned long parent_rate, unsigned long rate,
226 unsigned long flags)
209{ 227{
210 int up, down, div; 228 int up, down, div;
211 229
212 up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); 230 up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate);
213 231
214 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) { 232 if (flags & CLK_DIVIDER_POWER_OF_TWO) {
215 up = __roundup_pow_of_two(div); 233 up = __roundup_pow_of_two(div);
216 down = __rounddown_pow_of_two(div); 234 down = __rounddown_pow_of_two(div);
217 } else if (divider->table) { 235 } else if (table) {
218 up = _round_up_table(divider->table, div); 236 up = _round_up_table(table, div);
219 down = _round_down_table(divider->table, div); 237 down = _round_down_table(table, div);
220 } 238 }
221 239
222 return (up - div) <= (div - down) ? up : down; 240 return (up - div) <= (div - down) ? up : down;
223} 241}
224 242
225static int _div_round(struct clk_divider *divider, unsigned long parent_rate, 243static int _div_round(const struct clk_div_table *table,
226 unsigned long rate) 244 unsigned long parent_rate, unsigned long rate,
245 unsigned long flags)
227{ 246{
228 if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST) 247 if (flags & CLK_DIVIDER_ROUND_CLOSEST)
229 return _div_round_closest(divider, parent_rate, rate); 248 return _div_round_closest(table, parent_rate, rate, flags);
230 249
231 return _div_round_up(divider, parent_rate, rate); 250 return _div_round_up(table, parent_rate, rate, flags);
232} 251}
233 252
234static bool _is_best_div(struct clk_divider *divider, 253static bool _is_best_div(unsigned long rate, unsigned long now,
235 unsigned long rate, unsigned long now, unsigned long best) 254 unsigned long best, unsigned long flags)
236{ 255{
237 if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST) 256 if (flags & CLK_DIVIDER_ROUND_CLOSEST)
238 return abs(rate - now) < abs(rate - best); 257 return abs(rate - now) < abs(rate - best);
239 258
240 return now <= rate && now > best; 259 return now <= rate && now > best;
241} 260}
242 261
243static int _next_div(struct clk_divider *divider, int div) 262static int _next_div(const struct clk_div_table *table, int div,
263 unsigned long flags)
244{ 264{
245 div++; 265 div++;
246 266
247 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 267 if (flags & CLK_DIVIDER_POWER_OF_TWO)
248 return __roundup_pow_of_two(div); 268 return __roundup_pow_of_two(div);
249 if (divider->table) 269 if (table)
250 return _round_up_table(divider->table, div); 270 return _round_up_table(table, div);
251 271
252 return div; 272 return div;
253} 273}
254 274
255static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, 275static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
256 unsigned long *best_parent_rate) 276 unsigned long *best_parent_rate,
277 const struct clk_div_table *table, u8 width,
278 unsigned long flags)
257{ 279{
258 struct clk_divider *divider = to_clk_divider(hw);
259 int i, bestdiv = 0; 280 int i, bestdiv = 0;
260 unsigned long parent_rate, best = 0, now, maxdiv; 281 unsigned long parent_rate, best = 0, now, maxdiv;
261 unsigned long parent_rate_saved = *best_parent_rate; 282 unsigned long parent_rate_saved = *best_parent_rate;
@@ -263,19 +284,11 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
263 if (!rate) 284 if (!rate)
264 rate = 1; 285 rate = 1;
265 286
266 /* if read only, just return current value */ 287 maxdiv = _get_maxdiv(table, width, flags);
267 if (divider->flags & CLK_DIVIDER_READ_ONLY) {
268 bestdiv = readl(divider->reg) >> divider->shift;
269 bestdiv &= div_mask(divider);
270 bestdiv = _get_div(divider, bestdiv);
271 return bestdiv;
272 }
273
274 maxdiv = _get_maxdiv(divider);
275 288
276 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { 289 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
277 parent_rate = *best_parent_rate; 290 parent_rate = *best_parent_rate;
278 bestdiv = _div_round(divider, parent_rate, rate); 291 bestdiv = _div_round(table, parent_rate, rate, flags);
279 bestdiv = bestdiv == 0 ? 1 : bestdiv; 292 bestdiv = bestdiv == 0 ? 1 : bestdiv;
280 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; 293 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
281 return bestdiv; 294 return bestdiv;
@@ -287,8 +300,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
287 */ 300 */
288 maxdiv = min(ULONG_MAX / rate, maxdiv); 301 maxdiv = min(ULONG_MAX / rate, maxdiv);
289 302
290 for (i = 1; i <= maxdiv; i = _next_div(divider, i)) { 303 for (i = 1; i <= maxdiv; i = _next_div(table, i, flags)) {
291 if (!_is_valid_div(divider, i)) 304 if (!_is_valid_div(table, i, flags))
292 continue; 305 continue;
293 if (rate * i == parent_rate_saved) { 306 if (rate * i == parent_rate_saved) {
294 /* 307 /*
@@ -302,7 +315,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
302 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 315 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
303 MULT_ROUND_UP(rate, i)); 316 MULT_ROUND_UP(rate, i));
304 now = DIV_ROUND_UP(parent_rate, i); 317 now = DIV_ROUND_UP(parent_rate, i);
305 if (_is_best_div(divider, rate, now, best)) { 318 if (_is_best_div(rate, now, best, flags)) {
306 bestdiv = i; 319 bestdiv = i;
307 best = now; 320 best = now;
308 *best_parent_rate = parent_rate; 321 *best_parent_rate = parent_rate;
@@ -310,48 +323,79 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
310 } 323 }
311 324
312 if (!bestdiv) { 325 if (!bestdiv) {
313 bestdiv = _get_maxdiv(divider); 326 bestdiv = _get_maxdiv(table, width, flags);
314 *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1); 327 *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1);
315 } 328 }
316 329
317 return bestdiv; 330 return bestdiv;
318} 331}
319 332
320static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, 333long divider_round_rate(struct clk_hw *hw, unsigned long rate,
321 unsigned long *prate) 334 unsigned long *prate, const struct clk_div_table *table,
335 u8 width, unsigned long flags)
322{ 336{
323 int div; 337 int div;
324 div = clk_divider_bestdiv(hw, rate, prate); 338
339 div = clk_divider_bestdiv(hw, rate, prate, table, width, flags);
325 340
326 return DIV_ROUND_UP(*prate, div); 341 return DIV_ROUND_UP(*prate, div);
327} 342}
343EXPORT_SYMBOL_GPL(divider_round_rate);
328 344
329static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, 345static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
330 unsigned long parent_rate) 346 unsigned long *prate)
331{ 347{
332 struct clk_divider *divider = to_clk_divider(hw); 348 struct clk_divider *divider = to_clk_divider(hw);
349 int bestdiv;
350
351 /* if read only, just return current value */
352 if (divider->flags & CLK_DIVIDER_READ_ONLY) {
353 bestdiv = readl(divider->reg) >> divider->shift;
354 bestdiv &= div_mask(divider->width);
355 bestdiv = _get_div(divider->table, bestdiv, divider->flags);
356 return bestdiv;
357 }
358
359 return divider_round_rate(hw, rate, prate, divider->table,
360 divider->width, divider->flags);
361}
362
363int divider_get_val(unsigned long rate, unsigned long parent_rate,
364 const struct clk_div_table *table, u8 width,
365 unsigned long flags)
366{
333 unsigned int div, value; 367 unsigned int div, value;
334 unsigned long flags = 0;
335 u32 val;
336 368
337 div = DIV_ROUND_UP(parent_rate, rate); 369 div = DIV_ROUND_UP(parent_rate, rate);
338 370
339 if (!_is_valid_div(divider, div)) 371 if (!_is_valid_div(table, div, flags))
340 return -EINVAL; 372 return -EINVAL;
341 373
342 value = _get_val(divider, div); 374 value = _get_val(table, div, flags);
375
376 return min_t(unsigned int, value, div_mask(width));
377}
378EXPORT_SYMBOL_GPL(divider_get_val);
379
380static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
381 unsigned long parent_rate)
382{
383 struct clk_divider *divider = to_clk_divider(hw);
384 unsigned int value;
385 unsigned long flags = 0;
386 u32 val;
343 387
344 if (value > div_mask(divider)) 388 value = divider_get_val(rate, parent_rate, divider->table,
345 value = div_mask(divider); 389 divider->width, divider->flags);
346 390
347 if (divider->lock) 391 if (divider->lock)
348 spin_lock_irqsave(divider->lock, flags); 392 spin_lock_irqsave(divider->lock, flags);
349 393
350 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { 394 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
351 val = div_mask(divider) << (divider->shift + 16); 395 val = div_mask(divider->width) << (divider->shift + 16);
352 } else { 396 } else {
353 val = clk_readl(divider->reg); 397 val = clk_readl(divider->reg);
354 val &= ~(div_mask(divider) << divider->shift); 398 val &= ~(div_mask(divider->width) << divider->shift);
355 } 399 }
356 val |= value << divider->shift; 400 val |= value << divider->shift;
357 clk_writel(val, divider->reg); 401 clk_writel(val, divider->reg);
@@ -463,3 +507,19 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
463 width, clk_divider_flags, table, lock); 507 width, clk_divider_flags, table, lock);
464} 508}
465EXPORT_SYMBOL_GPL(clk_register_divider_table); 509EXPORT_SYMBOL_GPL(clk_register_divider_table);
510
511void clk_unregister_divider(struct clk *clk)
512{
513 struct clk_divider *div;
514 struct clk_hw *hw;
515
516 hw = __clk_get_hw(clk);
517 if (!hw)
518 return;
519
520 div = to_clk_divider(hw);
521
522 clk_unregister(clk);
523 kfree(div);
524}
525EXPORT_SYMBOL_GPL(clk_unregister_divider);
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 51fd87fb7ba6..3f0e4200cb5d 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -128,7 +128,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
128 struct clk_init_data init; 128 struct clk_init_data init;
129 129
130 if (clk_gate_flags & CLK_GATE_HIWORD_MASK) { 130 if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
131 if (bit_idx > 16) { 131 if (bit_idx > 15) {
132 pr_err("gate bit exceeds LOWORD field\n"); 132 pr_err("gate bit exceeds LOWORD field\n");
133 return ERR_PTR(-EINVAL); 133 return ERR_PTR(-EINVAL);
134 } 134 }
@@ -162,3 +162,19 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
162 return clk; 162 return clk;
163} 163}
164EXPORT_SYMBOL_GPL(clk_register_gate); 164EXPORT_SYMBOL_GPL(clk_register_gate);
165
166void clk_unregister_gate(struct clk *clk)
167{
168 struct clk_gate *gate;
169 struct clk_hw *hw;
170
171 hw = __clk_get_hw(clk);
172 if (!hw)
173 return;
174
175 gate = to_clk_gate(hw);
176
177 clk_unregister(clk);
178 kfree(gate);
179}
180EXPORT_SYMBOL_GPL(clk_unregister_gate);
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 6e1ecf94bf58..69a094c3783d 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -177,3 +177,19 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
177 NULL, lock); 177 NULL, lock);
178} 178}
179EXPORT_SYMBOL_GPL(clk_register_mux); 179EXPORT_SYMBOL_GPL(clk_register_mux);
180
181void clk_unregister_mux(struct clk *clk)
182{
183 struct clk_mux *mux;
184 struct clk_hw *hw;
185
186 hw = __clk_get_hw(clk);
187 if (!hw)
188 return;
189
190 mux = to_clk_mux(hw);
191
192 clk_unregister(clk);
193 kfree(mux);
194}
195EXPORT_SYMBOL_GPL(clk_unregister_mux);
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-qoriq.c
index 0a47d6f49cd6..cda90a971e39 100644
--- a/drivers/clk/clk-ppc-corenet.c
+++ b/drivers/clk/clk-qoriq.c
@@ -5,8 +5,11 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * clock driver for Freescale PowerPC corenet SoCs. 8 * clock driver for Freescale QorIQ SoCs.
9 */ 9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
10#include <linux/clk-provider.h> 13#include <linux/clk-provider.h>
11#include <linux/io.h> 14#include <linux/io.h>
12#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -19,6 +22,7 @@
19struct cmux_clk { 22struct cmux_clk {
20 struct clk_hw hw; 23 struct clk_hw hw;
21 void __iomem *reg; 24 void __iomem *reg;
25 unsigned int clk_per_pll;
22 u32 flags; 26 u32 flags;
23}; 27};
24 28
@@ -27,14 +31,12 @@ struct cmux_clk {
27#define CLKSEL_ADJUST BIT(0) 31#define CLKSEL_ADJUST BIT(0)
28#define to_cmux_clk(p) container_of(p, struct cmux_clk, hw) 32#define to_cmux_clk(p) container_of(p, struct cmux_clk, hw)
29 33
30static unsigned int clocks_per_pll;
31
32static int cmux_set_parent(struct clk_hw *hw, u8 idx) 34static int cmux_set_parent(struct clk_hw *hw, u8 idx)
33{ 35{
34 struct cmux_clk *clk = to_cmux_clk(hw); 36 struct cmux_clk *clk = to_cmux_clk(hw);
35 u32 clksel; 37 u32 clksel;
36 38
37 clksel = ((idx / clocks_per_pll) << 2) + idx % clocks_per_pll; 39 clksel = ((idx / clk->clk_per_pll) << 2) + idx % clk->clk_per_pll;
38 if (clk->flags & CLKSEL_ADJUST) 40 if (clk->flags & CLKSEL_ADJUST)
39 clksel += 8; 41 clksel += 8;
40 clksel = (clksel & 0xf) << CLKSEL_SHIFT; 42 clksel = (clksel & 0xf) << CLKSEL_SHIFT;
@@ -52,12 +54,12 @@ static u8 cmux_get_parent(struct clk_hw *hw)
52 clksel = (clksel >> CLKSEL_SHIFT) & 0xf; 54 clksel = (clksel >> CLKSEL_SHIFT) & 0xf;
53 if (clk->flags & CLKSEL_ADJUST) 55 if (clk->flags & CLKSEL_ADJUST)
54 clksel -= 8; 56 clksel -= 8;
55 clksel = (clksel >> 2) * clocks_per_pll + clksel % 4; 57 clksel = (clksel >> 2) * clk->clk_per_pll + clksel % 4;
56 58
57 return clksel; 59 return clksel;
58} 60}
59 61
60const struct clk_ops cmux_ops = { 62static const struct clk_ops cmux_ops = {
61 .get_parent = cmux_get_parent, 63 .get_parent = cmux_get_parent,
62 .set_parent = cmux_set_parent, 64 .set_parent = cmux_set_parent,
63}; 65};
@@ -72,6 +74,7 @@ static void __init core_mux_init(struct device_node *np)
72 u32 offset; 74 u32 offset;
73 const char *clk_name; 75 const char *clk_name;
74 const char **parent_names; 76 const char **parent_names;
77 struct of_phandle_args clkspec;
75 78
76 rc = of_property_read_u32(np, "reg", &offset); 79 rc = of_property_read_u32(np, "reg", &offset);
77 if (rc) { 80 if (rc) {
@@ -85,32 +88,40 @@ static void __init core_mux_init(struct device_node *np)
85 pr_err("%s: get clock count error\n", np->name); 88 pr_err("%s: get clock count error\n", np->name);
86 return; 89 return;
87 } 90 }
88 parent_names = kzalloc((sizeof(char *) * count), GFP_KERNEL); 91 parent_names = kcalloc(count, sizeof(char *), GFP_KERNEL);
89 if (!parent_names) { 92 if (!parent_names)
90 pr_err("%s: could not allocate parent_names\n", __func__);
91 return; 93 return;
92 }
93 94
94 for (i = 0; i < count; i++) 95 for (i = 0; i < count; i++)
95 parent_names[i] = of_clk_get_parent_name(np, i); 96 parent_names[i] = of_clk_get_parent_name(np, i);
96 97
97 cmux_clk = kzalloc(sizeof(struct cmux_clk), GFP_KERNEL); 98 cmux_clk = kzalloc(sizeof(*cmux_clk), GFP_KERNEL);
98 if (!cmux_clk) { 99 if (!cmux_clk)
99 pr_err("%s: could not allocate cmux_clk\n", __func__);
100 goto err_name; 100 goto err_name;
101 } 101
102 cmux_clk->reg = of_iomap(np, 0); 102 cmux_clk->reg = of_iomap(np, 0);
103 if (!cmux_clk->reg) { 103 if (!cmux_clk->reg) {
104 pr_err("%s: could not map register\n", __func__); 104 pr_err("%s: could not map register\n", __func__);
105 goto err_clk; 105 goto err_clk;
106 } 106 }
107 107
108 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0,
109 &clkspec);
110 if (rc) {
111 pr_err("%s: parse clock node error\n", __func__);
112 goto err_clk;
113 }
114
115 cmux_clk->clk_per_pll = of_property_count_strings(clkspec.np,
116 "clock-output-names");
117 of_node_put(clkspec.np);
118
108 node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen"); 119 node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen");
109 if (node && (offset >= 0x80)) 120 if (node && (offset >= 0x80))
110 cmux_clk->flags = CLKSEL_ADJUST; 121 cmux_clk->flags = CLKSEL_ADJUST;
111 122
112 rc = of_property_read_string_index(np, "clock-output-names", 123 rc = of_property_read_string_index(np, "clock-output-names",
113 0, &clk_name); 124 0, &clk_name);
114 if (rc) { 125 if (rc) {
115 pr_err("%s: read clock names error\n", np->name); 126 pr_err("%s: read clock names error\n", np->name);
116 goto err_clk; 127 goto err_clk;
@@ -132,7 +143,7 @@ static void __init core_mux_init(struct device_node *np)
132 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk); 143 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
133 if (rc) { 144 if (rc) {
134 pr_err("Could not register clock provider for node:%s\n", 145 pr_err("Could not register clock provider for node:%s\n",
135 np->name); 146 np->name);
136 goto err_clk; 147 goto err_clk;
137 } 148 }
138 goto err_name; 149 goto err_name;
@@ -155,7 +166,7 @@ static void __init core_pll_init(struct device_node *np)
155 166
156 base = of_iomap(np, 0); 167 base = of_iomap(np, 0);
157 if (!base) { 168 if (!base) {
158 pr_err("clk-ppc: iomap error\n"); 169 pr_err("iomap error\n");
159 return; 170 return;
160 } 171 }
161 172
@@ -181,24 +192,17 @@ static void __init core_pll_init(struct device_node *np)
181 goto err_map; 192 goto err_map;
182 } 193 }
183 194
184 /* output clock number per PLL */ 195 subclks = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
185 clocks_per_pll = count; 196 if (!subclks)
186
187 subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL);
188 if (!subclks) {
189 pr_err("%s: could not allocate subclks\n", __func__);
190 goto err_map; 197 goto err_map;
191 }
192 198
193 onecell_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL); 199 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
194 if (!onecell_data) { 200 if (!onecell_data)
195 pr_err("%s: could not allocate onecell_data\n", __func__);
196 goto err_clks; 201 goto err_clks;
197 }
198 202
199 for (i = 0; i < count; i++) { 203 for (i = 0; i < count; i++) {
200 rc = of_property_read_string_index(np, "clock-output-names", 204 rc = of_property_read_string_index(np, "clock-output-names",
201 i, &clk_name); 205 i, &clk_name);
202 if (rc) { 206 if (rc) {
203 pr_err("%s: could not get clock names\n", np->name); 207 pr_err("%s: could not get clock names\n", np->name);
204 goto err_cell; 208 goto err_cell;
@@ -230,7 +234,7 @@ static void __init core_pll_init(struct device_node *np)
230 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data); 234 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
231 if (rc) { 235 if (rc) {
232 pr_err("Could not register clk provider for node:%s\n", 236 pr_err("Could not register clk provider for node:%s\n",
233 np->name); 237 np->name);
234 goto err_cell; 238 goto err_cell;
235 } 239 }
236 240
@@ -252,7 +256,7 @@ static void __init sysclk_init(struct device_node *node)
252 u32 rate; 256 u32 rate;
253 257
254 if (!np) { 258 if (!np) {
255 pr_err("ppc-clk: could not get parent node\n"); 259 pr_err("could not get parent node\n");
256 return; 260 return;
257 } 261 }
258 262
@@ -268,39 +272,91 @@ static void __init sysclk_init(struct device_node *node)
268 of_clk_add_provider(np, of_clk_src_simple_get, clk); 272 of_clk_add_provider(np, of_clk_src_simple_get, clk);
269} 273}
270 274
271static const struct of_device_id clk_match[] __initconst = { 275static void __init pltfrm_pll_init(struct device_node *np)
272 { .compatible = "fsl,qoriq-sysclk-1.0", .data = sysclk_init, },
273 { .compatible = "fsl,qoriq-sysclk-2.0", .data = sysclk_init, },
274 { .compatible = "fsl,qoriq-core-pll-1.0", .data = core_pll_init, },
275 { .compatible = "fsl,qoriq-core-pll-2.0", .data = core_pll_init, },
276 { .compatible = "fsl,qoriq-core-mux-1.0", .data = core_mux_init, },
277 { .compatible = "fsl,qoriq-core-mux-2.0", .data = core_mux_init, },
278 {}
279};
280
281static int __init ppc_corenet_clk_probe(struct platform_device *pdev)
282{ 276{
283 of_clk_init(clk_match); 277 void __iomem *base;
278 uint32_t mult;
279 const char *parent_name, *clk_name;
280 int i, _errno;
281 struct clk_onecell_data *cod;
284 282
285 return 0; 283 base = of_iomap(np, 0);
286} 284 if (!base) {
285 pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
286 return;
287 }
287 288
288static const struct of_device_id ppc_clk_ids[] __initconst = { 289 /* Get the multiple of PLL */
289 { .compatible = "fsl,qoriq-clockgen-1.0", }, 290 mult = ioread32be(base);
290 { .compatible = "fsl,qoriq-clockgen-2.0", },
291 {}
292};
293 291
294static struct platform_driver ppc_corenet_clk_driver = { 292 iounmap(base);
295 .driver = {
296 .name = "ppc_corenet_clock",
297 .of_match_table = ppc_clk_ids,
298 },
299 .probe = ppc_corenet_clk_probe,
300};
301 293
302static int __init ppc_corenet_clk_init(void) 294 /* Check if this PLL is disabled */
303{ 295 if (mult & PLL_KILL) {
304 return platform_driver_register(&ppc_corenet_clk_driver); 296 pr_debug("%s(): %s: Disabled\n", __func__, np->name);
297 return;
298 }
299 mult = (mult & GENMASK(6, 1)) >> 1;
300
301 parent_name = of_clk_get_parent_name(np, 0);
302 if (!parent_name) {
303 pr_err("%s(): %s: of_clk_get_parent_name() failed\n",
304 __func__, np->name);
305 return;
306 }
307
308 i = of_property_count_strings(np, "clock-output-names");
309 if (i < 0) {
310 pr_err("%s(): %s: of_property_count_strings(clock-output-names) = %d\n",
311 __func__, np->name, i);
312 return;
313 }
314
315 cod = kmalloc(sizeof(*cod) + i * sizeof(struct clk *), GFP_KERNEL);
316 if (!cod)
317 return;
318 cod->clks = (struct clk **)(cod + 1);
319 cod->clk_num = i;
320
321 for (i = 0; i < cod->clk_num; i++) {
322 _errno = of_property_read_string_index(np, "clock-output-names",
323 i, &clk_name);
324 if (_errno < 0) {
325 pr_err("%s(): %s: of_property_read_string_index(clock-output-names) = %d\n",
326 __func__, np->name, _errno);
327 goto return_clk_unregister;
328 }
329
330 cod->clks[i] = clk_register_fixed_factor(NULL, clk_name,
331 parent_name, 0, mult, 1 + i);
332 if (IS_ERR(cod->clks[i])) {
333 pr_err("%s(): %s: clk_register_fixed_factor(%s) = %ld\n",
334 __func__, np->name,
335 clk_name, PTR_ERR(cod->clks[i]));
336 goto return_clk_unregister;
337 }
338 }
339
340 _errno = of_clk_add_provider(np, of_clk_src_onecell_get, cod);
341 if (_errno < 0) {
342 pr_err("%s(): %s: of_clk_add_provider() = %d\n",
343 __func__, np->name, _errno);
344 goto return_clk_unregister;
345 }
346
347 return;
348
349return_clk_unregister:
350 while (--i >= 0)
351 clk_unregister(cod->clks[i]);
352 kfree(cod);
305} 353}
306subsys_initcall(ppc_corenet_clk_init); 354
355CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
356CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
357CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
358CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
359CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
360CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
361CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
362CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 642cf37124d3..eb0152961d3c 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -9,7 +9,7 @@
9 * Standard functionality for the common clock API. See Documentation/clk.txt 9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */ 10 */
11 11
12#include <linux/clk-private.h> 12#include <linux/clk-provider.h>
13#include <linux/clk/clk-conf.h> 13#include <linux/clk/clk-conf.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/mutex.h> 15#include <linux/mutex.h>
@@ -37,6 +37,55 @@ static HLIST_HEAD(clk_root_list);
37static HLIST_HEAD(clk_orphan_list); 37static HLIST_HEAD(clk_orphan_list);
38static LIST_HEAD(clk_notifier_list); 38static LIST_HEAD(clk_notifier_list);
39 39
40static long clk_core_get_accuracy(struct clk_core *clk);
41static unsigned long clk_core_get_rate(struct clk_core *clk);
42static int clk_core_get_phase(struct clk_core *clk);
43static bool clk_core_is_prepared(struct clk_core *clk);
44static bool clk_core_is_enabled(struct clk_core *clk);
45static struct clk_core *clk_core_lookup(const char *name);
46
47/*** private data structures ***/
48
49struct clk_core {
50 const char *name;
51 const struct clk_ops *ops;
52 struct clk_hw *hw;
53 struct module *owner;
54 struct clk_core *parent;
55 const char **parent_names;
56 struct clk_core **parents;
57 u8 num_parents;
58 u8 new_parent_index;
59 unsigned long rate;
60 unsigned long req_rate;
61 unsigned long new_rate;
62 struct clk_core *new_parent;
63 struct clk_core *new_child;
64 unsigned long flags;
65 unsigned int enable_count;
66 unsigned int prepare_count;
67 unsigned long accuracy;
68 int phase;
69 struct hlist_head children;
70 struct hlist_node child_node;
71 struct hlist_node debug_node;
72 struct hlist_head clks;
73 unsigned int notifier_count;
74#ifdef CONFIG_DEBUG_FS
75 struct dentry *dentry;
76#endif
77 struct kref ref;
78};
79
80struct clk {
81 struct clk_core *core;
82 const char *dev_id;
83 const char *con_id;
84 unsigned long min_rate;
85 unsigned long max_rate;
86 struct hlist_node child_node;
87};
88
40/*** locking ***/ 89/*** locking ***/
41static void clk_prepare_lock(void) 90static void clk_prepare_lock(void)
42{ 91{
@@ -114,7 +163,8 @@ static struct hlist_head *orphan_list[] = {
114 NULL, 163 NULL,
115}; 164};
116 165
117static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level) 166static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
167 int level)
118{ 168{
119 if (!c) 169 if (!c)
120 return; 170 return;
@@ -122,14 +172,14 @@ static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
122 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", 172 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
123 level * 3 + 1, "", 173 level * 3 + 1, "",
124 30 - level * 3, c->name, 174 30 - level * 3, c->name,
125 c->enable_count, c->prepare_count, clk_get_rate(c), 175 c->enable_count, c->prepare_count, clk_core_get_rate(c),
126 clk_get_accuracy(c), clk_get_phase(c)); 176 clk_core_get_accuracy(c), clk_core_get_phase(c));
127} 177}
128 178
129static void clk_summary_show_subtree(struct seq_file *s, struct clk *c, 179static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
130 int level) 180 int level)
131{ 181{
132 struct clk *child; 182 struct clk_core *child;
133 183
134 if (!c) 184 if (!c)
135 return; 185 return;
@@ -142,7 +192,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
142 192
143static int clk_summary_show(struct seq_file *s, void *data) 193static int clk_summary_show(struct seq_file *s, void *data)
144{ 194{
145 struct clk *c; 195 struct clk_core *c;
146 struct hlist_head **lists = (struct hlist_head **)s->private; 196 struct hlist_head **lists = (struct hlist_head **)s->private;
147 197
148 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); 198 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
@@ -172,7 +222,7 @@ static const struct file_operations clk_summary_fops = {
172 .release = single_release, 222 .release = single_release,
173}; 223};
174 224
175static void clk_dump_one(struct seq_file *s, struct clk *c, int level) 225static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
176{ 226{
177 if (!c) 227 if (!c)
178 return; 228 return;
@@ -180,14 +230,14 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
180 seq_printf(s, "\"%s\": { ", c->name); 230 seq_printf(s, "\"%s\": { ", c->name);
181 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 231 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
182 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 232 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
183 seq_printf(s, "\"rate\": %lu", clk_get_rate(c)); 233 seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
184 seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c)); 234 seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
185 seq_printf(s, "\"phase\": %d", clk_get_phase(c)); 235 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
186} 236}
187 237
188static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level) 238static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
189{ 239{
190 struct clk *child; 240 struct clk_core *child;
191 241
192 if (!c) 242 if (!c)
193 return; 243 return;
@@ -204,7 +254,7 @@ static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
204 254
205static int clk_dump(struct seq_file *s, void *data) 255static int clk_dump(struct seq_file *s, void *data)
206{ 256{
207 struct clk *c; 257 struct clk_core *c;
208 bool first_node = true; 258 bool first_node = true;
209 struct hlist_head **lists = (struct hlist_head **)s->private; 259 struct hlist_head **lists = (struct hlist_head **)s->private;
210 260
@@ -240,7 +290,7 @@ static const struct file_operations clk_dump_fops = {
240 .release = single_release, 290 .release = single_release,
241}; 291};
242 292
243static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry) 293static int clk_debug_create_one(struct clk_core *clk, struct dentry *pdentry)
244{ 294{
245 struct dentry *d; 295 struct dentry *d;
246 int ret = -ENOMEM; 296 int ret = -ENOMEM;
@@ -315,7 +365,7 @@ out:
315 * initialized. Otherwise it bails out early since the debugfs clk tree 365 * initialized. Otherwise it bails out early since the debugfs clk tree
316 * will be created lazily by clk_debug_init as part of a late_initcall. 366 * will be created lazily by clk_debug_init as part of a late_initcall.
317 */ 367 */
318static int clk_debug_register(struct clk *clk) 368static int clk_debug_register(struct clk_core *clk)
319{ 369{
320 int ret = 0; 370 int ret = 0;
321 371
@@ -340,16 +390,12 @@ unlock:
340 * debugfs clk tree if clk->dentry points to debugfs created by 390 * debugfs clk tree if clk->dentry points to debugfs created by
341 * clk_debug_register in __clk_init. 391 * clk_debug_register in __clk_init.
342 */ 392 */
343static void clk_debug_unregister(struct clk *clk) 393static void clk_debug_unregister(struct clk_core *clk)
344{ 394{
345 mutex_lock(&clk_debug_lock); 395 mutex_lock(&clk_debug_lock);
346 if (!clk->dentry)
347 goto out;
348
349 hlist_del_init(&clk->debug_node); 396 hlist_del_init(&clk->debug_node);
350 debugfs_remove_recursive(clk->dentry); 397 debugfs_remove_recursive(clk->dentry);
351 clk->dentry = NULL; 398 clk->dentry = NULL;
352out:
353 mutex_unlock(&clk_debug_lock); 399 mutex_unlock(&clk_debug_lock);
354} 400}
355 401
@@ -358,8 +404,9 @@ struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
358{ 404{
359 struct dentry *d = NULL; 405 struct dentry *d = NULL;
360 406
361 if (hw->clk->dentry) 407 if (hw->core->dentry)
362 d = debugfs_create_file(name, mode, hw->clk->dentry, data, fops); 408 d = debugfs_create_file(name, mode, hw->core->dentry, data,
409 fops);
363 410
364 return d; 411 return d;
365} 412}
@@ -379,7 +426,7 @@ EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
379 */ 426 */
380static int __init clk_debug_init(void) 427static int __init clk_debug_init(void)
381{ 428{
382 struct clk *clk; 429 struct clk_core *clk;
383 struct dentry *d; 430 struct dentry *d;
384 431
385 rootdir = debugfs_create_dir("clk", NULL); 432 rootdir = debugfs_create_dir("clk", NULL);
@@ -418,22 +465,20 @@ static int __init clk_debug_init(void)
418} 465}
419late_initcall(clk_debug_init); 466late_initcall(clk_debug_init);
420#else 467#else
421static inline int clk_debug_register(struct clk *clk) { return 0; } 468static inline int clk_debug_register(struct clk_core *clk) { return 0; }
422static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent) 469static inline void clk_debug_reparent(struct clk_core *clk,
470 struct clk_core *new_parent)
423{ 471{
424} 472}
425static inline void clk_debug_unregister(struct clk *clk) 473static inline void clk_debug_unregister(struct clk_core *clk)
426{ 474{
427} 475}
428#endif 476#endif
429 477
430/* caller must hold prepare_lock */ 478/* caller must hold prepare_lock */
431static void clk_unprepare_unused_subtree(struct clk *clk) 479static void clk_unprepare_unused_subtree(struct clk_core *clk)
432{ 480{
433 struct clk *child; 481 struct clk_core *child;
434
435 if (!clk)
436 return;
437 482
438 hlist_for_each_entry(child, &clk->children, child_node) 483 hlist_for_each_entry(child, &clk->children, child_node)
439 clk_unprepare_unused_subtree(child); 484 clk_unprepare_unused_subtree(child);
@@ -444,7 +489,7 @@ static void clk_unprepare_unused_subtree(struct clk *clk)
444 if (clk->flags & CLK_IGNORE_UNUSED) 489 if (clk->flags & CLK_IGNORE_UNUSED)
445 return; 490 return;
446 491
447 if (__clk_is_prepared(clk)) { 492 if (clk_core_is_prepared(clk)) {
448 if (clk->ops->unprepare_unused) 493 if (clk->ops->unprepare_unused)
449 clk->ops->unprepare_unused(clk->hw); 494 clk->ops->unprepare_unused(clk->hw);
450 else if (clk->ops->unprepare) 495 else if (clk->ops->unprepare)
@@ -453,14 +498,11 @@ static void clk_unprepare_unused_subtree(struct clk *clk)
453} 498}
454 499
455/* caller must hold prepare_lock */ 500/* caller must hold prepare_lock */
456static void clk_disable_unused_subtree(struct clk *clk) 501static void clk_disable_unused_subtree(struct clk_core *clk)
457{ 502{
458 struct clk *child; 503 struct clk_core *child;
459 unsigned long flags; 504 unsigned long flags;
460 505
461 if (!clk)
462 goto out;
463
464 hlist_for_each_entry(child, &clk->children, child_node) 506 hlist_for_each_entry(child, &clk->children, child_node)
465 clk_disable_unused_subtree(child); 507 clk_disable_unused_subtree(child);
466 508
@@ -477,7 +519,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
477 * sequence. call .disable_unused if available, otherwise fall 519 * sequence. call .disable_unused if available, otherwise fall
478 * back to .disable 520 * back to .disable
479 */ 521 */
480 if (__clk_is_enabled(clk)) { 522 if (clk_core_is_enabled(clk)) {
481 if (clk->ops->disable_unused) 523 if (clk->ops->disable_unused)
482 clk->ops->disable_unused(clk->hw); 524 clk->ops->disable_unused(clk->hw);
483 else if (clk->ops->disable) 525 else if (clk->ops->disable)
@@ -486,9 +528,6 @@ static void clk_disable_unused_subtree(struct clk *clk)
486 528
487unlock_out: 529unlock_out:
488 clk_enable_unlock(flags); 530 clk_enable_unlock(flags);
489
490out:
491 return;
492} 531}
493 532
494static bool clk_ignore_unused; 533static bool clk_ignore_unused;
@@ -501,7 +540,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
501 540
502static int clk_disable_unused(void) 541static int clk_disable_unused(void)
503{ 542{
504 struct clk *clk; 543 struct clk_core *clk;
505 544
506 if (clk_ignore_unused) { 545 if (clk_ignore_unused) {
507 pr_warn("clk: Not disabling unused clocks\n"); 546 pr_warn("clk: Not disabling unused clocks\n");
@@ -532,48 +571,65 @@ late_initcall_sync(clk_disable_unused);
532 571
533const char *__clk_get_name(struct clk *clk) 572const char *__clk_get_name(struct clk *clk)
534{ 573{
535 return !clk ? NULL : clk->name; 574 return !clk ? NULL : clk->core->name;
536} 575}
537EXPORT_SYMBOL_GPL(__clk_get_name); 576EXPORT_SYMBOL_GPL(__clk_get_name);
538 577
539struct clk_hw *__clk_get_hw(struct clk *clk) 578struct clk_hw *__clk_get_hw(struct clk *clk)
540{ 579{
541 return !clk ? NULL : clk->hw; 580 return !clk ? NULL : clk->core->hw;
542} 581}
543EXPORT_SYMBOL_GPL(__clk_get_hw); 582EXPORT_SYMBOL_GPL(__clk_get_hw);
544 583
545u8 __clk_get_num_parents(struct clk *clk) 584u8 __clk_get_num_parents(struct clk *clk)
546{ 585{
547 return !clk ? 0 : clk->num_parents; 586 return !clk ? 0 : clk->core->num_parents;
548} 587}
549EXPORT_SYMBOL_GPL(__clk_get_num_parents); 588EXPORT_SYMBOL_GPL(__clk_get_num_parents);
550 589
551struct clk *__clk_get_parent(struct clk *clk) 590struct clk *__clk_get_parent(struct clk *clk)
552{ 591{
553 return !clk ? NULL : clk->parent; 592 if (!clk)
593 return NULL;
594
595 /* TODO: Create a per-user clk and change callers to call clk_put */
596 return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
554} 597}
555EXPORT_SYMBOL_GPL(__clk_get_parent); 598EXPORT_SYMBOL_GPL(__clk_get_parent);
556 599
557struct clk *clk_get_parent_by_index(struct clk *clk, u8 index) 600static struct clk_core *clk_core_get_parent_by_index(struct clk_core *clk,
601 u8 index)
558{ 602{
559 if (!clk || index >= clk->num_parents) 603 if (!clk || index >= clk->num_parents)
560 return NULL; 604 return NULL;
561 else if (!clk->parents) 605 else if (!clk->parents)
562 return __clk_lookup(clk->parent_names[index]); 606 return clk_core_lookup(clk->parent_names[index]);
563 else if (!clk->parents[index]) 607 else if (!clk->parents[index])
564 return clk->parents[index] = 608 return clk->parents[index] =
565 __clk_lookup(clk->parent_names[index]); 609 clk_core_lookup(clk->parent_names[index]);
566 else 610 else
567 return clk->parents[index]; 611 return clk->parents[index];
568} 612}
613
614struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
615{
616 struct clk_core *parent;
617
618 if (!clk)
619 return NULL;
620
621 parent = clk_core_get_parent_by_index(clk->core, index);
622
623 return !parent ? NULL : parent->hw->clk;
624}
569EXPORT_SYMBOL_GPL(clk_get_parent_by_index); 625EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
570 626
571unsigned int __clk_get_enable_count(struct clk *clk) 627unsigned int __clk_get_enable_count(struct clk *clk)
572{ 628{
573 return !clk ? 0 : clk->enable_count; 629 return !clk ? 0 : clk->core->enable_count;
574} 630}
575 631
576unsigned long __clk_get_rate(struct clk *clk) 632static unsigned long clk_core_get_rate_nolock(struct clk_core *clk)
577{ 633{
578 unsigned long ret; 634 unsigned long ret;
579 635
@@ -593,9 +649,17 @@ unsigned long __clk_get_rate(struct clk *clk)
593out: 649out:
594 return ret; 650 return ret;
595} 651}
652
653unsigned long __clk_get_rate(struct clk *clk)
654{
655 if (!clk)
656 return 0;
657
658 return clk_core_get_rate_nolock(clk->core);
659}
596EXPORT_SYMBOL_GPL(__clk_get_rate); 660EXPORT_SYMBOL_GPL(__clk_get_rate);
597 661
598static unsigned long __clk_get_accuracy(struct clk *clk) 662static unsigned long __clk_get_accuracy(struct clk_core *clk)
599{ 663{
600 if (!clk) 664 if (!clk)
601 return 0; 665 return 0;
@@ -605,11 +669,11 @@ static unsigned long __clk_get_accuracy(struct clk *clk)
605 669
606unsigned long __clk_get_flags(struct clk *clk) 670unsigned long __clk_get_flags(struct clk *clk)
607{ 671{
608 return !clk ? 0 : clk->flags; 672 return !clk ? 0 : clk->core->flags;
609} 673}
610EXPORT_SYMBOL_GPL(__clk_get_flags); 674EXPORT_SYMBOL_GPL(__clk_get_flags);
611 675
612bool __clk_is_prepared(struct clk *clk) 676static bool clk_core_is_prepared(struct clk_core *clk)
613{ 677{
614 int ret; 678 int ret;
615 679
@@ -630,7 +694,15 @@ out:
630 return !!ret; 694 return !!ret;
631} 695}
632 696
633bool __clk_is_enabled(struct clk *clk) 697bool __clk_is_prepared(struct clk *clk)
698{
699 if (!clk)
700 return false;
701
702 return clk_core_is_prepared(clk->core);
703}
704
705static bool clk_core_is_enabled(struct clk_core *clk)
634{ 706{
635 int ret; 707 int ret;
636 708
@@ -650,12 +722,21 @@ bool __clk_is_enabled(struct clk *clk)
650out: 722out:
651 return !!ret; 723 return !!ret;
652} 724}
725
726bool __clk_is_enabled(struct clk *clk)
727{
728 if (!clk)
729 return false;
730
731 return clk_core_is_enabled(clk->core);
732}
653EXPORT_SYMBOL_GPL(__clk_is_enabled); 733EXPORT_SYMBOL_GPL(__clk_is_enabled);
654 734
655static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk) 735static struct clk_core *__clk_lookup_subtree(const char *name,
736 struct clk_core *clk)
656{ 737{
657 struct clk *child; 738 struct clk_core *child;
658 struct clk *ret; 739 struct clk_core *ret;
659 740
660 if (!strcmp(clk->name, name)) 741 if (!strcmp(clk->name, name))
661 return clk; 742 return clk;
@@ -669,10 +750,10 @@ static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
669 return NULL; 750 return NULL;
670} 751}
671 752
672struct clk *__clk_lookup(const char *name) 753static struct clk_core *clk_core_lookup(const char *name)
673{ 754{
674 struct clk *root_clk; 755 struct clk_core *root_clk;
675 struct clk *ret; 756 struct clk_core *ret;
676 757
677 if (!name) 758 if (!name)
678 return NULL; 759 return NULL;
@@ -694,42 +775,53 @@ struct clk *__clk_lookup(const char *name)
694 return NULL; 775 return NULL;
695} 776}
696 777
697/* 778static bool mux_is_better_rate(unsigned long rate, unsigned long now,
698 * Helper for finding best parent to provide a given frequency. This can be used 779 unsigned long best, unsigned long flags)
699 * directly as a determine_rate callback (e.g. for a mux), or from a more
700 * complex clock that may combine a mux with other operations.
701 */
702long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
703 unsigned long *best_parent_rate,
704 struct clk_hw **best_parent_p)
705{ 780{
706 struct clk *clk = hw->clk, *parent, *best_parent = NULL; 781 if (flags & CLK_MUX_ROUND_CLOSEST)
782 return abs(now - rate) < abs(best - rate);
783
784 return now <= rate && now > best;
785}
786
787static long
788clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate,
789 unsigned long min_rate,
790 unsigned long max_rate,
791 unsigned long *best_parent_rate,
792 struct clk_hw **best_parent_p,
793 unsigned long flags)
794{
795 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
707 int i, num_parents; 796 int i, num_parents;
708 unsigned long parent_rate, best = 0; 797 unsigned long parent_rate, best = 0;
709 798
710 /* if NO_REPARENT flag set, pass through to current parent */ 799 /* if NO_REPARENT flag set, pass through to current parent */
711 if (clk->flags & CLK_SET_RATE_NO_REPARENT) { 800 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
712 parent = clk->parent; 801 parent = core->parent;
713 if (clk->flags & CLK_SET_RATE_PARENT) 802 if (core->flags & CLK_SET_RATE_PARENT)
714 best = __clk_round_rate(parent, rate); 803 best = __clk_determine_rate(parent ? parent->hw : NULL,
804 rate, min_rate, max_rate);
715 else if (parent) 805 else if (parent)
716 best = __clk_get_rate(parent); 806 best = clk_core_get_rate_nolock(parent);
717 else 807 else
718 best = __clk_get_rate(clk); 808 best = clk_core_get_rate_nolock(core);
719 goto out; 809 goto out;
720 } 810 }
721 811
722 /* find the parent that can provide the fastest rate <= rate */ 812 /* find the parent that can provide the fastest rate <= rate */
723 num_parents = clk->num_parents; 813 num_parents = core->num_parents;
724 for (i = 0; i < num_parents; i++) { 814 for (i = 0; i < num_parents; i++) {
725 parent = clk_get_parent_by_index(clk, i); 815 parent = clk_core_get_parent_by_index(core, i);
726 if (!parent) 816 if (!parent)
727 continue; 817 continue;
728 if (clk->flags & CLK_SET_RATE_PARENT) 818 if (core->flags & CLK_SET_RATE_PARENT)
729 parent_rate = __clk_round_rate(parent, rate); 819 parent_rate = __clk_determine_rate(parent->hw, rate,
820 min_rate,
821 max_rate);
730 else 822 else
731 parent_rate = __clk_get_rate(parent); 823 parent_rate = clk_core_get_rate_nolock(parent);
732 if (parent_rate <= rate && parent_rate > best) { 824 if (mux_is_better_rate(rate, parent_rate, best, flags)) {
733 best_parent = parent; 825 best_parent = parent;
734 best = parent_rate; 826 best = parent_rate;
735 } 827 }
@@ -742,11 +834,63 @@ out:
742 834
743 return best; 835 return best;
744} 836}
837
838struct clk *__clk_lookup(const char *name)
839{
840 struct clk_core *core = clk_core_lookup(name);
841
842 return !core ? NULL : core->hw->clk;
843}
844
845static void clk_core_get_boundaries(struct clk_core *clk,
846 unsigned long *min_rate,
847 unsigned long *max_rate)
848{
849 struct clk *clk_user;
850
851 *min_rate = 0;
852 *max_rate = ULONG_MAX;
853
854 hlist_for_each_entry(clk_user, &clk->clks, child_node)
855 *min_rate = max(*min_rate, clk_user->min_rate);
856
857 hlist_for_each_entry(clk_user, &clk->clks, child_node)
858 *max_rate = min(*max_rate, clk_user->max_rate);
859}
860
861/*
862 * Helper for finding best parent to provide a given frequency. This can be used
863 * directly as a determine_rate callback (e.g. for a mux), or from a more
864 * complex clock that may combine a mux with other operations.
865 */
866long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
867 unsigned long min_rate,
868 unsigned long max_rate,
869 unsigned long *best_parent_rate,
870 struct clk_hw **best_parent_p)
871{
872 return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
873 best_parent_rate,
874 best_parent_p, 0);
875}
745EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 876EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
746 877
878long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
879 unsigned long min_rate,
880 unsigned long max_rate,
881 unsigned long *best_parent_rate,
882 struct clk_hw **best_parent_p)
883{
884 return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
885 best_parent_rate,
886 best_parent_p,
887 CLK_MUX_ROUND_CLOSEST);
888}
889EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
890
747/*** clk api ***/ 891/*** clk api ***/
748 892
749void __clk_unprepare(struct clk *clk) 893static void clk_core_unprepare(struct clk_core *clk)
750{ 894{
751 if (!clk) 895 if (!clk)
752 return; 896 return;
@@ -762,7 +906,7 @@ void __clk_unprepare(struct clk *clk)
762 if (clk->ops->unprepare) 906 if (clk->ops->unprepare)
763 clk->ops->unprepare(clk->hw); 907 clk->ops->unprepare(clk->hw);
764 908
765 __clk_unprepare(clk->parent); 909 clk_core_unprepare(clk->parent);
766} 910}
767 911
768/** 912/**
@@ -782,12 +926,12 @@ void clk_unprepare(struct clk *clk)
782 return; 926 return;
783 927
784 clk_prepare_lock(); 928 clk_prepare_lock();
785 __clk_unprepare(clk); 929 clk_core_unprepare(clk->core);
786 clk_prepare_unlock(); 930 clk_prepare_unlock();
787} 931}
788EXPORT_SYMBOL_GPL(clk_unprepare); 932EXPORT_SYMBOL_GPL(clk_unprepare);
789 933
790int __clk_prepare(struct clk *clk) 934static int clk_core_prepare(struct clk_core *clk)
791{ 935{
792 int ret = 0; 936 int ret = 0;
793 937
@@ -795,14 +939,14 @@ int __clk_prepare(struct clk *clk)
795 return 0; 939 return 0;
796 940
797 if (clk->prepare_count == 0) { 941 if (clk->prepare_count == 0) {
798 ret = __clk_prepare(clk->parent); 942 ret = clk_core_prepare(clk->parent);
799 if (ret) 943 if (ret)
800 return ret; 944 return ret;
801 945
802 if (clk->ops->prepare) { 946 if (clk->ops->prepare) {
803 ret = clk->ops->prepare(clk->hw); 947 ret = clk->ops->prepare(clk->hw);
804 if (ret) { 948 if (ret) {
805 __clk_unprepare(clk->parent); 949 clk_core_unprepare(clk->parent);
806 return ret; 950 return ret;
807 } 951 }
808 } 952 }
@@ -829,15 +973,18 @@ int clk_prepare(struct clk *clk)
829{ 973{
830 int ret; 974 int ret;
831 975
976 if (!clk)
977 return 0;
978
832 clk_prepare_lock(); 979 clk_prepare_lock();
833 ret = __clk_prepare(clk); 980 ret = clk_core_prepare(clk->core);
834 clk_prepare_unlock(); 981 clk_prepare_unlock();
835 982
836 return ret; 983 return ret;
837} 984}
838EXPORT_SYMBOL_GPL(clk_prepare); 985EXPORT_SYMBOL_GPL(clk_prepare);
839 986
840static void __clk_disable(struct clk *clk) 987static void clk_core_disable(struct clk_core *clk)
841{ 988{
842 if (!clk) 989 if (!clk)
843 return; 990 return;
@@ -851,7 +998,15 @@ static void __clk_disable(struct clk *clk)
851 if (clk->ops->disable) 998 if (clk->ops->disable)
852 clk->ops->disable(clk->hw); 999 clk->ops->disable(clk->hw);
853 1000
854 __clk_disable(clk->parent); 1001 clk_core_disable(clk->parent);
1002}
1003
1004static void __clk_disable(struct clk *clk)
1005{
1006 if (!clk)
1007 return;
1008
1009 clk_core_disable(clk->core);
855} 1010}
856 1011
857/** 1012/**
@@ -879,7 +1034,7 @@ void clk_disable(struct clk *clk)
879} 1034}
880EXPORT_SYMBOL_GPL(clk_disable); 1035EXPORT_SYMBOL_GPL(clk_disable);
881 1036
882static int __clk_enable(struct clk *clk) 1037static int clk_core_enable(struct clk_core *clk)
883{ 1038{
884 int ret = 0; 1039 int ret = 0;
885 1040
@@ -890,7 +1045,7 @@ static int __clk_enable(struct clk *clk)
890 return -ESHUTDOWN; 1045 return -ESHUTDOWN;
891 1046
892 if (clk->enable_count == 0) { 1047 if (clk->enable_count == 0) {
893 ret = __clk_enable(clk->parent); 1048 ret = clk_core_enable(clk->parent);
894 1049
895 if (ret) 1050 if (ret)
896 return ret; 1051 return ret;
@@ -898,7 +1053,7 @@ static int __clk_enable(struct clk *clk)
898 if (clk->ops->enable) { 1053 if (clk->ops->enable) {
899 ret = clk->ops->enable(clk->hw); 1054 ret = clk->ops->enable(clk->hw);
900 if (ret) { 1055 if (ret) {
901 __clk_disable(clk->parent); 1056 clk_core_disable(clk->parent);
902 return ret; 1057 return ret;
903 } 1058 }
904 } 1059 }
@@ -908,6 +1063,14 @@ static int __clk_enable(struct clk *clk)
908 return 0; 1063 return 0;
909} 1064}
910 1065
1066static int __clk_enable(struct clk *clk)
1067{
1068 if (!clk)
1069 return 0;
1070
1071 return clk_core_enable(clk->core);
1072}
1073
911/** 1074/**
912 * clk_enable - ungate a clock 1075 * clk_enable - ungate a clock
913 * @clk: the clk being ungated 1076 * @clk: the clk being ungated
@@ -934,17 +1097,13 @@ int clk_enable(struct clk *clk)
934} 1097}
935EXPORT_SYMBOL_GPL(clk_enable); 1098EXPORT_SYMBOL_GPL(clk_enable);
936 1099
937/** 1100static unsigned long clk_core_round_rate_nolock(struct clk_core *clk,
938 * __clk_round_rate - round the given rate for a clk 1101 unsigned long rate,
939 * @clk: round the rate of this clock 1102 unsigned long min_rate,
940 * @rate: the rate which is to be rounded 1103 unsigned long max_rate)
941 *
942 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
943 */
944unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
945{ 1104{
946 unsigned long parent_rate = 0; 1105 unsigned long parent_rate = 0;
947 struct clk *parent; 1106 struct clk_core *parent;
948 struct clk_hw *parent_hw; 1107 struct clk_hw *parent_hw;
949 1108
950 if (!clk) 1109 if (!clk)
@@ -956,15 +1115,59 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
956 1115
957 if (clk->ops->determine_rate) { 1116 if (clk->ops->determine_rate) {
958 parent_hw = parent ? parent->hw : NULL; 1117 parent_hw = parent ? parent->hw : NULL;
959 return clk->ops->determine_rate(clk->hw, rate, &parent_rate, 1118 return clk->ops->determine_rate(clk->hw, rate,
960 &parent_hw); 1119 min_rate, max_rate,
1120 &parent_rate, &parent_hw);
961 } else if (clk->ops->round_rate) 1121 } else if (clk->ops->round_rate)
962 return clk->ops->round_rate(clk->hw, rate, &parent_rate); 1122 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
963 else if (clk->flags & CLK_SET_RATE_PARENT) 1123 else if (clk->flags & CLK_SET_RATE_PARENT)
964 return __clk_round_rate(clk->parent, rate); 1124 return clk_core_round_rate_nolock(clk->parent, rate, min_rate,
1125 max_rate);
965 else 1126 else
966 return clk->rate; 1127 return clk->rate;
967} 1128}
1129
1130/**
1131 * __clk_determine_rate - get the closest rate actually supported by a clock
1132 * @hw: determine the rate of this clock
1133 * @rate: target rate
1134 * @min_rate: returned rate must be greater than this rate
1135 * @max_rate: returned rate must be less than this rate
1136 *
1137 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate and
1138 * .determine_rate.
1139 */
1140unsigned long __clk_determine_rate(struct clk_hw *hw,
1141 unsigned long rate,
1142 unsigned long min_rate,
1143 unsigned long max_rate)
1144{
1145 if (!hw)
1146 return 0;
1147
1148 return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate);
1149}
1150EXPORT_SYMBOL_GPL(__clk_determine_rate);
1151
1152/**
1153 * __clk_round_rate - round the given rate for a clk
1154 * @clk: round the rate of this clock
1155 * @rate: the rate which is to be rounded
1156 *
1157 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
1158 */
1159unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
1160{
1161 unsigned long min_rate;
1162 unsigned long max_rate;
1163
1164 if (!clk)
1165 return 0;
1166
1167 clk_core_get_boundaries(clk->core, &min_rate, &max_rate);
1168
1169 return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate);
1170}
968EXPORT_SYMBOL_GPL(__clk_round_rate); 1171EXPORT_SYMBOL_GPL(__clk_round_rate);
969 1172
970/** 1173/**
@@ -980,6 +1183,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
980{ 1183{
981 unsigned long ret; 1184 unsigned long ret;
982 1185
1186 if (!clk)
1187 return 0;
1188
983 clk_prepare_lock(); 1189 clk_prepare_lock();
984 ret = __clk_round_rate(clk, rate); 1190 ret = __clk_round_rate(clk, rate);
985 clk_prepare_unlock(); 1191 clk_prepare_unlock();
@@ -1002,22 +1208,21 @@ EXPORT_SYMBOL_GPL(clk_round_rate);
1002 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1208 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1003 * a driver returns that. 1209 * a driver returns that.
1004 */ 1210 */
1005static int __clk_notify(struct clk *clk, unsigned long msg, 1211static int __clk_notify(struct clk_core *clk, unsigned long msg,
1006 unsigned long old_rate, unsigned long new_rate) 1212 unsigned long old_rate, unsigned long new_rate)
1007{ 1213{
1008 struct clk_notifier *cn; 1214 struct clk_notifier *cn;
1009 struct clk_notifier_data cnd; 1215 struct clk_notifier_data cnd;
1010 int ret = NOTIFY_DONE; 1216 int ret = NOTIFY_DONE;
1011 1217
1012 cnd.clk = clk;
1013 cnd.old_rate = old_rate; 1218 cnd.old_rate = old_rate;
1014 cnd.new_rate = new_rate; 1219 cnd.new_rate = new_rate;
1015 1220
1016 list_for_each_entry(cn, &clk_notifier_list, node) { 1221 list_for_each_entry(cn, &clk_notifier_list, node) {
1017 if (cn->clk == clk) { 1222 if (cn->clk->core == clk) {
1223 cnd.clk = cn->clk;
1018 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1224 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1019 &cnd); 1225 &cnd);
1020 break;
1021 } 1226 }
1022 } 1227 }
1023 1228
@@ -1035,10 +1240,10 @@ static int __clk_notify(struct clk *clk, unsigned long msg,
1035 * 1240 *
1036 * Caller must hold prepare_lock. 1241 * Caller must hold prepare_lock.
1037 */ 1242 */
1038static void __clk_recalc_accuracies(struct clk *clk) 1243static void __clk_recalc_accuracies(struct clk_core *clk)
1039{ 1244{
1040 unsigned long parent_accuracy = 0; 1245 unsigned long parent_accuracy = 0;
1041 struct clk *child; 1246 struct clk_core *child;
1042 1247
1043 if (clk->parent) 1248 if (clk->parent)
1044 parent_accuracy = clk->parent->accuracy; 1249 parent_accuracy = clk->parent->accuracy;
@@ -1053,6 +1258,20 @@ static void __clk_recalc_accuracies(struct clk *clk)
1053 __clk_recalc_accuracies(child); 1258 __clk_recalc_accuracies(child);
1054} 1259}
1055 1260
1261static long clk_core_get_accuracy(struct clk_core *clk)
1262{
1263 unsigned long accuracy;
1264
1265 clk_prepare_lock();
1266 if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
1267 __clk_recalc_accuracies(clk);
1268
1269 accuracy = __clk_get_accuracy(clk);
1270 clk_prepare_unlock();
1271
1272 return accuracy;
1273}
1274
1056/** 1275/**
1057 * clk_get_accuracy - return the accuracy of clk 1276 * clk_get_accuracy - return the accuracy of clk
1058 * @clk: the clk whose accuracy is being returned 1277 * @clk: the clk whose accuracy is being returned
@@ -1064,20 +1283,15 @@ static void __clk_recalc_accuracies(struct clk *clk)
1064 */ 1283 */
1065long clk_get_accuracy(struct clk *clk) 1284long clk_get_accuracy(struct clk *clk)
1066{ 1285{
1067 unsigned long accuracy; 1286 if (!clk)
1068 1287 return 0;
1069 clk_prepare_lock();
1070 if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
1071 __clk_recalc_accuracies(clk);
1072
1073 accuracy = __clk_get_accuracy(clk);
1074 clk_prepare_unlock();
1075 1288
1076 return accuracy; 1289 return clk_core_get_accuracy(clk->core);
1077} 1290}
1078EXPORT_SYMBOL_GPL(clk_get_accuracy); 1291EXPORT_SYMBOL_GPL(clk_get_accuracy);
1079 1292
1080static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate) 1293static unsigned long clk_recalc(struct clk_core *clk,
1294 unsigned long parent_rate)
1081{ 1295{
1082 if (clk->ops->recalc_rate) 1296 if (clk->ops->recalc_rate)
1083 return clk->ops->recalc_rate(clk->hw, parent_rate); 1297 return clk->ops->recalc_rate(clk->hw, parent_rate);
@@ -1098,11 +1312,11 @@ static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate)
1098 * 1312 *
1099 * Caller must hold prepare_lock. 1313 * Caller must hold prepare_lock.
1100 */ 1314 */
1101static void __clk_recalc_rates(struct clk *clk, unsigned long msg) 1315static void __clk_recalc_rates(struct clk_core *clk, unsigned long msg)
1102{ 1316{
1103 unsigned long old_rate; 1317 unsigned long old_rate;
1104 unsigned long parent_rate = 0; 1318 unsigned long parent_rate = 0;
1105 struct clk *child; 1319 struct clk_core *child;
1106 1320
1107 old_rate = clk->rate; 1321 old_rate = clk->rate;
1108 1322
@@ -1122,15 +1336,7 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
1122 __clk_recalc_rates(child, msg); 1336 __clk_recalc_rates(child, msg);
1123} 1337}
1124 1338
1125/** 1339static unsigned long clk_core_get_rate(struct clk_core *clk)
1126 * clk_get_rate - return the rate of clk
1127 * @clk: the clk whose rate is being returned
1128 *
1129 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1130 * is set, which means a recalc_rate will be issued.
1131 * If clk is NULL then returns 0.
1132 */
1133unsigned long clk_get_rate(struct clk *clk)
1134{ 1340{
1135 unsigned long rate; 1341 unsigned long rate;
1136 1342
@@ -1139,14 +1345,32 @@ unsigned long clk_get_rate(struct clk *clk)
1139 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE)) 1345 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
1140 __clk_recalc_rates(clk, 0); 1346 __clk_recalc_rates(clk, 0);
1141 1347
1142 rate = __clk_get_rate(clk); 1348 rate = clk_core_get_rate_nolock(clk);
1143 clk_prepare_unlock(); 1349 clk_prepare_unlock();
1144 1350
1145 return rate; 1351 return rate;
1146} 1352}
1353EXPORT_SYMBOL_GPL(clk_core_get_rate);
1354
1355/**
1356 * clk_get_rate - return the rate of clk
1357 * @clk: the clk whose rate is being returned
1358 *
1359 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1360 * is set, which means a recalc_rate will be issued.
1361 * If clk is NULL then returns 0.
1362 */
1363unsigned long clk_get_rate(struct clk *clk)
1364{
1365 if (!clk)
1366 return 0;
1367
1368 return clk_core_get_rate(clk->core);
1369}
1147EXPORT_SYMBOL_GPL(clk_get_rate); 1370EXPORT_SYMBOL_GPL(clk_get_rate);
1148 1371
1149static int clk_fetch_parent_index(struct clk *clk, struct clk *parent) 1372static int clk_fetch_parent_index(struct clk_core *clk,
1373 struct clk_core *parent)
1150{ 1374{
1151 int i; 1375 int i;
1152 1376
@@ -1160,7 +1384,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
1160 /* 1384 /*
1161 * find index of new parent clock using cached parent ptrs, 1385 * find index of new parent clock using cached parent ptrs,
1162 * or if not yet cached, use string name comparison and cache 1386 * or if not yet cached, use string name comparison and cache
1163 * them now to avoid future calls to __clk_lookup. 1387 * them now to avoid future calls to clk_core_lookup.
1164 */ 1388 */
1165 for (i = 0; i < clk->num_parents; i++) { 1389 for (i = 0; i < clk->num_parents; i++) {
1166 if (clk->parents[i] == parent) 1390 if (clk->parents[i] == parent)
@@ -1170,7 +1394,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
1170 continue; 1394 continue;
1171 1395
1172 if (!strcmp(clk->parent_names[i], parent->name)) { 1396 if (!strcmp(clk->parent_names[i], parent->name)) {
1173 clk->parents[i] = __clk_lookup(parent->name); 1397 clk->parents[i] = clk_core_lookup(parent->name);
1174 return i; 1398 return i;
1175 } 1399 }
1176 } 1400 }
@@ -1178,7 +1402,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
1178 return -EINVAL; 1402 return -EINVAL;
1179} 1403}
1180 1404
1181static void clk_reparent(struct clk *clk, struct clk *new_parent) 1405static void clk_reparent(struct clk_core *clk, struct clk_core *new_parent)
1182{ 1406{
1183 hlist_del(&clk->child_node); 1407 hlist_del(&clk->child_node);
1184 1408
@@ -1195,10 +1419,11 @@ static void clk_reparent(struct clk *clk, struct clk *new_parent)
1195 clk->parent = new_parent; 1419 clk->parent = new_parent;
1196} 1420}
1197 1421
1198static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent) 1422static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
1423 struct clk_core *parent)
1199{ 1424{
1200 unsigned long flags; 1425 unsigned long flags;
1201 struct clk *old_parent = clk->parent; 1426 struct clk_core *old_parent = clk->parent;
1202 1427
1203 /* 1428 /*
1204 * Migrate prepare state between parents and prevent race with 1429 * Migrate prepare state between parents and prevent race with
@@ -1218,9 +1443,9 @@ static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
1218 * See also: Comment for clk_set_parent() below. 1443 * See also: Comment for clk_set_parent() below.
1219 */ 1444 */
1220 if (clk->prepare_count) { 1445 if (clk->prepare_count) {
1221 __clk_prepare(parent); 1446 clk_core_prepare(parent);
1222 clk_enable(parent); 1447 clk_core_enable(parent);
1223 clk_enable(clk); 1448 clk_core_enable(clk);
1224 } 1449 }
1225 1450
1226 /* update the clk tree topology */ 1451 /* update the clk tree topology */
@@ -1231,25 +1456,27 @@ static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
1231 return old_parent; 1456 return old_parent;
1232} 1457}
1233 1458
1234static void __clk_set_parent_after(struct clk *clk, struct clk *parent, 1459static void __clk_set_parent_after(struct clk_core *core,
1235 struct clk *old_parent) 1460 struct clk_core *parent,
1461 struct clk_core *old_parent)
1236{ 1462{
1237 /* 1463 /*
1238 * Finish the migration of prepare state and undo the changes done 1464 * Finish the migration of prepare state and undo the changes done
1239 * for preventing a race with clk_enable(). 1465 * for preventing a race with clk_enable().
1240 */ 1466 */
1241 if (clk->prepare_count) { 1467 if (core->prepare_count) {
1242 clk_disable(clk); 1468 clk_core_disable(core);
1243 clk_disable(old_parent); 1469 clk_core_disable(old_parent);
1244 __clk_unprepare(old_parent); 1470 clk_core_unprepare(old_parent);
1245 } 1471 }
1246} 1472}
1247 1473
1248static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) 1474static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
1475 u8 p_index)
1249{ 1476{
1250 unsigned long flags; 1477 unsigned long flags;
1251 int ret = 0; 1478 int ret = 0;
1252 struct clk *old_parent; 1479 struct clk_core *old_parent;
1253 1480
1254 old_parent = __clk_set_parent_before(clk, parent); 1481 old_parent = __clk_set_parent_before(clk, parent);
1255 1482
@@ -1263,9 +1490,9 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1263 clk_enable_unlock(flags); 1490 clk_enable_unlock(flags);
1264 1491
1265 if (clk->prepare_count) { 1492 if (clk->prepare_count) {
1266 clk_disable(clk); 1493 clk_core_disable(clk);
1267 clk_disable(parent); 1494 clk_core_disable(parent);
1268 __clk_unprepare(parent); 1495 clk_core_unprepare(parent);
1269 } 1496 }
1270 return ret; 1497 return ret;
1271 } 1498 }
@@ -1291,9 +1518,10 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1291 * 1518 *
1292 * Caller must hold prepare_lock. 1519 * Caller must hold prepare_lock.
1293 */ 1520 */
1294static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate) 1521static int __clk_speculate_rates(struct clk_core *clk,
1522 unsigned long parent_rate)
1295{ 1523{
1296 struct clk *child; 1524 struct clk_core *child;
1297 unsigned long new_rate; 1525 unsigned long new_rate;
1298 int ret = NOTIFY_DONE; 1526 int ret = NOTIFY_DONE;
1299 1527
@@ -1319,10 +1547,10 @@ out:
1319 return ret; 1547 return ret;
1320} 1548}
1321 1549
1322static void clk_calc_subtree(struct clk *clk, unsigned long new_rate, 1550static void clk_calc_subtree(struct clk_core *clk, unsigned long new_rate,
1323 struct clk *new_parent, u8 p_index) 1551 struct clk_core *new_parent, u8 p_index)
1324{ 1552{
1325 struct clk *child; 1553 struct clk_core *child;
1326 1554
1327 clk->new_rate = new_rate; 1555 clk->new_rate = new_rate;
1328 clk->new_parent = new_parent; 1556 clk->new_parent = new_parent;
@@ -1342,13 +1570,16 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
1342 * calculate the new rates returning the topmost clock that has to be 1570 * calculate the new rates returning the topmost clock that has to be
1343 * changed. 1571 * changed.
1344 */ 1572 */
1345static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) 1573static struct clk_core *clk_calc_new_rates(struct clk_core *clk,
1574 unsigned long rate)
1346{ 1575{
1347 struct clk *top = clk; 1576 struct clk_core *top = clk;
1348 struct clk *old_parent, *parent; 1577 struct clk_core *old_parent, *parent;
1349 struct clk_hw *parent_hw; 1578 struct clk_hw *parent_hw;
1350 unsigned long best_parent_rate = 0; 1579 unsigned long best_parent_rate = 0;
1351 unsigned long new_rate; 1580 unsigned long new_rate;
1581 unsigned long min_rate;
1582 unsigned long max_rate;
1352 int p_index = 0; 1583 int p_index = 0;
1353 1584
1354 /* sanity */ 1585 /* sanity */
@@ -1360,16 +1591,22 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1360 if (parent) 1591 if (parent)
1361 best_parent_rate = parent->rate; 1592 best_parent_rate = parent->rate;
1362 1593
1594 clk_core_get_boundaries(clk, &min_rate, &max_rate);
1595
1363 /* find the closest rate and parent clk/rate */ 1596 /* find the closest rate and parent clk/rate */
1364 if (clk->ops->determine_rate) { 1597 if (clk->ops->determine_rate) {
1365 parent_hw = parent ? parent->hw : NULL; 1598 parent_hw = parent ? parent->hw : NULL;
1366 new_rate = clk->ops->determine_rate(clk->hw, rate, 1599 new_rate = clk->ops->determine_rate(clk->hw, rate,
1600 min_rate,
1601 max_rate,
1367 &best_parent_rate, 1602 &best_parent_rate,
1368 &parent_hw); 1603 &parent_hw);
1369 parent = parent_hw ? parent_hw->clk : NULL; 1604 parent = parent_hw ? parent_hw->core : NULL;
1370 } else if (clk->ops->round_rate) { 1605 } else if (clk->ops->round_rate) {
1371 new_rate = clk->ops->round_rate(clk->hw, rate, 1606 new_rate = clk->ops->round_rate(clk->hw, rate,
1372 &best_parent_rate); 1607 &best_parent_rate);
1608 if (new_rate < min_rate || new_rate > max_rate)
1609 return NULL;
1373 } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) { 1610 } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
1374 /* pass-through clock without adjustable parent */ 1611 /* pass-through clock without adjustable parent */
1375 clk->new_rate = clk->rate; 1612 clk->new_rate = clk->rate;
@@ -1390,7 +1627,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1390 } 1627 }
1391 1628
1392 /* try finding the new parent index */ 1629 /* try finding the new parent index */
1393 if (parent) { 1630 if (parent && clk->num_parents > 1) {
1394 p_index = clk_fetch_parent_index(clk, parent); 1631 p_index = clk_fetch_parent_index(clk, parent);
1395 if (p_index < 0) { 1632 if (p_index < 0) {
1396 pr_debug("%s: clk %s can not be parent of clk %s\n", 1633 pr_debug("%s: clk %s can not be parent of clk %s\n",
@@ -1414,9 +1651,10 @@ out:
1414 * so that in case of an error we can walk down the whole tree again and 1651 * so that in case of an error we can walk down the whole tree again and
1415 * abort the change. 1652 * abort the change.
1416 */ 1653 */
1417static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event) 1654static struct clk_core *clk_propagate_rate_change(struct clk_core *clk,
1655 unsigned long event)
1418{ 1656{
1419 struct clk *child, *tmp_clk, *fail_clk = NULL; 1657 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
1420 int ret = NOTIFY_DONE; 1658 int ret = NOTIFY_DONE;
1421 1659
1422 if (clk->rate == clk->new_rate) 1660 if (clk->rate == clk->new_rate)
@@ -1451,14 +1689,14 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
1451 * walk down a subtree and set the new rates notifying the rate 1689 * walk down a subtree and set the new rates notifying the rate
1452 * change on the way 1690 * change on the way
1453 */ 1691 */
1454static void clk_change_rate(struct clk *clk) 1692static void clk_change_rate(struct clk_core *clk)
1455{ 1693{
1456 struct clk *child; 1694 struct clk_core *child;
1457 struct hlist_node *tmp; 1695 struct hlist_node *tmp;
1458 unsigned long old_rate; 1696 unsigned long old_rate;
1459 unsigned long best_parent_rate = 0; 1697 unsigned long best_parent_rate = 0;
1460 bool skip_set_rate = false; 1698 bool skip_set_rate = false;
1461 struct clk *old_parent; 1699 struct clk_core *old_parent;
1462 1700
1463 old_rate = clk->rate; 1701 old_rate = clk->rate;
1464 1702
@@ -1506,6 +1744,45 @@ static void clk_change_rate(struct clk *clk)
1506 clk_change_rate(clk->new_child); 1744 clk_change_rate(clk->new_child);
1507} 1745}
1508 1746
1747static int clk_core_set_rate_nolock(struct clk_core *clk,
1748 unsigned long req_rate)
1749{
1750 struct clk_core *top, *fail_clk;
1751 unsigned long rate = req_rate;
1752 int ret = 0;
1753
1754 if (!clk)
1755 return 0;
1756
1757 /* bail early if nothing to do */
1758 if (rate == clk_core_get_rate_nolock(clk))
1759 return 0;
1760
1761 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count)
1762 return -EBUSY;
1763
1764 /* calculate new rates and get the topmost changed clock */
1765 top = clk_calc_new_rates(clk, rate);
1766 if (!top)
1767 return -EINVAL;
1768
1769 /* notify that we are about to change rates */
1770 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1771 if (fail_clk) {
1772 pr_debug("%s: failed to set %s rate\n", __func__,
1773 fail_clk->name);
1774 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1775 return -EBUSY;
1776 }
1777
1778 /* change the rates */
1779 clk_change_rate(top);
1780
1781 clk->req_rate = req_rate;
1782
1783 return ret;
1784}
1785
1509/** 1786/**
1510 * clk_set_rate - specify a new rate for clk 1787 * clk_set_rate - specify a new rate for clk
1511 * @clk: the clk whose rate is being changed 1788 * @clk: the clk whose rate is being changed
@@ -1529,8 +1806,7 @@ static void clk_change_rate(struct clk *clk)
1529 */ 1806 */
1530int clk_set_rate(struct clk *clk, unsigned long rate) 1807int clk_set_rate(struct clk *clk, unsigned long rate)
1531{ 1808{
1532 struct clk *top, *fail_clk; 1809 int ret;
1533 int ret = 0;
1534 1810
1535 if (!clk) 1811 if (!clk)
1536 return 0; 1812 return 0;
@@ -1538,41 +1814,81 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
1538 /* prevent racing with updates to the clock topology */ 1814 /* prevent racing with updates to the clock topology */
1539 clk_prepare_lock(); 1815 clk_prepare_lock();
1540 1816
1541 /* bail early if nothing to do */ 1817 ret = clk_core_set_rate_nolock(clk->core, rate);
1542 if (rate == clk_get_rate(clk))
1543 goto out;
1544 1818
1545 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) { 1819 clk_prepare_unlock();
1546 ret = -EBUSY;
1547 goto out;
1548 }
1549 1820
1550 /* calculate new rates and get the topmost changed clock */ 1821 return ret;
1551 top = clk_calc_new_rates(clk, rate); 1822}
1552 if (!top) { 1823EXPORT_SYMBOL_GPL(clk_set_rate);
1553 ret = -EINVAL;
1554 goto out;
1555 }
1556 1824
1557 /* notify that we are about to change rates */ 1825/**
1558 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 1826 * clk_set_rate_range - set a rate range for a clock source
1559 if (fail_clk) { 1827 * @clk: clock source
1560 pr_debug("%s: failed to set %s rate\n", __func__, 1828 * @min: desired minimum clock rate in Hz, inclusive
1561 fail_clk->name); 1829 * @max: desired maximum clock rate in Hz, inclusive
1562 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 1830 *
1563 ret = -EBUSY; 1831 * Returns success (0) or negative errno.
1564 goto out; 1832 */
1833int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
1834{
1835 int ret = 0;
1836
1837 if (!clk)
1838 return 0;
1839
1840 if (min > max) {
1841 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
1842 __func__, clk->core->name, clk->dev_id, clk->con_id,
1843 min, max);
1844 return -EINVAL;
1565 } 1845 }
1566 1846
1567 /* change the rates */ 1847 clk_prepare_lock();
1568 clk_change_rate(top); 1848
1849 if (min != clk->min_rate || max != clk->max_rate) {
1850 clk->min_rate = min;
1851 clk->max_rate = max;
1852 ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
1853 }
1569 1854
1570out:
1571 clk_prepare_unlock(); 1855 clk_prepare_unlock();
1572 1856
1573 return ret; 1857 return ret;
1574} 1858}
1575EXPORT_SYMBOL_GPL(clk_set_rate); 1859EXPORT_SYMBOL_GPL(clk_set_rate_range);
1860
1861/**
1862 * clk_set_min_rate - set a minimum clock rate for a clock source
1863 * @clk: clock source
1864 * @rate: desired minimum clock rate in Hz, inclusive
1865 *
1866 * Returns success (0) or negative errno.
1867 */
1868int clk_set_min_rate(struct clk *clk, unsigned long rate)
1869{
1870 if (!clk)
1871 return 0;
1872
1873 return clk_set_rate_range(clk, rate, clk->max_rate);
1874}
1875EXPORT_SYMBOL_GPL(clk_set_min_rate);
1876
1877/**
1878 * clk_set_max_rate - set a maximum clock rate for a clock source
1879 * @clk: clock source
1880 * @rate: desired maximum clock rate in Hz, inclusive
1881 *
1882 * Returns success (0) or negative errno.
1883 */
1884int clk_set_max_rate(struct clk *clk, unsigned long rate)
1885{
1886 if (!clk)
1887 return 0;
1888
1889 return clk_set_rate_range(clk, clk->min_rate, rate);
1890}
1891EXPORT_SYMBOL_GPL(clk_set_max_rate);
1576 1892
1577/** 1893/**
1578 * clk_get_parent - return the parent of a clk 1894 * clk_get_parent - return the parent of a clk
@@ -1599,11 +1915,11 @@ EXPORT_SYMBOL_GPL(clk_get_parent);
1599 * 1915 *
1600 * For single-parent clocks without .get_parent, first check to see if the 1916 * For single-parent clocks without .get_parent, first check to see if the
1601 * .parents array exists, and if so use it to avoid an expensive tree 1917 * .parents array exists, and if so use it to avoid an expensive tree
1602 * traversal. If .parents does not exist then walk the tree with __clk_lookup. 1918 * traversal. If .parents does not exist then walk the tree.
1603 */ 1919 */
1604static struct clk *__clk_init_parent(struct clk *clk) 1920static struct clk_core *__clk_init_parent(struct clk_core *clk)
1605{ 1921{
1606 struct clk *ret = NULL; 1922 struct clk_core *ret = NULL;
1607 u8 index; 1923 u8 index;
1608 1924
1609 /* handle the trivial cases */ 1925 /* handle the trivial cases */
@@ -1613,7 +1929,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
1613 1929
1614 if (clk->num_parents == 1) { 1930 if (clk->num_parents == 1) {
1615 if (IS_ERR_OR_NULL(clk->parent)) 1931 if (IS_ERR_OR_NULL(clk->parent))
1616 clk->parent = __clk_lookup(clk->parent_names[0]); 1932 clk->parent = clk_core_lookup(clk->parent_names[0]);
1617 ret = clk->parent; 1933 ret = clk->parent;
1618 goto out; 1934 goto out;
1619 } 1935 }
@@ -1627,8 +1943,8 @@ static struct clk *__clk_init_parent(struct clk *clk)
1627 1943
1628 /* 1944 /*
1629 * Do our best to cache parent clocks in clk->parents. This prevents 1945 * Do our best to cache parent clocks in clk->parents. This prevents
1630 * unnecessary and expensive calls to __clk_lookup. We don't set 1946 * unnecessary and expensive lookups. We don't set clk->parent here;
1631 * clk->parent here; that is done by the calling function 1947 * that is done by the calling function.
1632 */ 1948 */
1633 1949
1634 index = clk->ops->get_parent(clk->hw); 1950 index = clk->ops->get_parent(clk->hw);
@@ -1638,13 +1954,14 @@ static struct clk *__clk_init_parent(struct clk *clk)
1638 kcalloc(clk->num_parents, sizeof(struct clk *), 1954 kcalloc(clk->num_parents, sizeof(struct clk *),
1639 GFP_KERNEL); 1955 GFP_KERNEL);
1640 1956
1641 ret = clk_get_parent_by_index(clk, index); 1957 ret = clk_core_get_parent_by_index(clk, index);
1642 1958
1643out: 1959out:
1644 return ret; 1960 return ret;
1645} 1961}
1646 1962
1647void __clk_reparent(struct clk *clk, struct clk *new_parent) 1963static void clk_core_reparent(struct clk_core *clk,
1964 struct clk_core *new_parent)
1648{ 1965{
1649 clk_reparent(clk, new_parent); 1966 clk_reparent(clk, new_parent);
1650 __clk_recalc_accuracies(clk); 1967 __clk_recalc_accuracies(clk);
@@ -1652,23 +1969,40 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
1652} 1969}
1653 1970
1654/** 1971/**
1655 * clk_set_parent - switch the parent of a mux clk 1972 * clk_has_parent - check if a clock is a possible parent for another
1656 * @clk: the mux clk whose input we are switching 1973 * @clk: clock source
1657 * @parent: the new input to clk 1974 * @parent: parent clock source
1658 * 1975 *
1659 * Re-parent clk to use parent as its new input source. If clk is in 1976 * This function can be used in drivers that need to check that a clock can be
1660 * prepared state, the clk will get enabled for the duration of this call. If 1977 * the parent of another without actually changing the parent.
1661 * that's not acceptable for a specific clk (Eg: the consumer can't handle
1662 * that, the reparenting is glitchy in hardware, etc), use the
1663 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1664 *
1665 * After successfully changing clk's parent clk_set_parent will update the
1666 * clk topology, sysfs topology and propagate rate recalculation via
1667 * __clk_recalc_rates.
1668 * 1978 *
1669 * Returns 0 on success, -EERROR otherwise. 1979 * Returns true if @parent is a possible parent for @clk, false otherwise.
1670 */ 1980 */
1671int clk_set_parent(struct clk *clk, struct clk *parent) 1981bool clk_has_parent(struct clk *clk, struct clk *parent)
1982{
1983 struct clk_core *core, *parent_core;
1984 unsigned int i;
1985
1986 /* NULL clocks should be nops, so return success if either is NULL. */
1987 if (!clk || !parent)
1988 return true;
1989
1990 core = clk->core;
1991 parent_core = parent->core;
1992
1993 /* Optimize for the case where the parent is already the parent. */
1994 if (core->parent == parent_core)
1995 return true;
1996
1997 for (i = 0; i < core->num_parents; i++)
1998 if (strcmp(core->parent_names[i], parent_core->name) == 0)
1999 return true;
2000
2001 return false;
2002}
2003EXPORT_SYMBOL_GPL(clk_has_parent);
2004
2005static int clk_core_set_parent(struct clk_core *clk, struct clk_core *parent)
1672{ 2006{
1673 int ret = 0; 2007 int ret = 0;
1674 int p_index = 0; 2008 int p_index = 0;
@@ -1728,6 +2062,31 @@ out:
1728 2062
1729 return ret; 2063 return ret;
1730} 2064}
2065
2066/**
2067 * clk_set_parent - switch the parent of a mux clk
2068 * @clk: the mux clk whose input we are switching
2069 * @parent: the new input to clk
2070 *
2071 * Re-parent clk to use parent as its new input source. If clk is in
2072 * prepared state, the clk will get enabled for the duration of this call. If
2073 * that's not acceptable for a specific clk (Eg: the consumer can't handle
2074 * that, the reparenting is glitchy in hardware, etc), use the
2075 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2076 *
2077 * After successfully changing clk's parent clk_set_parent will update the
2078 * clk topology, sysfs topology and propagate rate recalculation via
2079 * __clk_recalc_rates.
2080 *
2081 * Returns 0 on success, -EERROR otherwise.
2082 */
2083int clk_set_parent(struct clk *clk, struct clk *parent)
2084{
2085 if (!clk)
2086 return 0;
2087
2088 return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
2089}
1731EXPORT_SYMBOL_GPL(clk_set_parent); 2090EXPORT_SYMBOL_GPL(clk_set_parent);
1732 2091
1733/** 2092/**
@@ -1764,13 +2123,13 @@ int clk_set_phase(struct clk *clk, int degrees)
1764 2123
1765 clk_prepare_lock(); 2124 clk_prepare_lock();
1766 2125
1767 if (!clk->ops->set_phase) 2126 if (!clk->core->ops->set_phase)
1768 goto out_unlock; 2127 goto out_unlock;
1769 2128
1770 ret = clk->ops->set_phase(clk->hw, degrees); 2129 ret = clk->core->ops->set_phase(clk->core->hw, degrees);
1771 2130
1772 if (!ret) 2131 if (!ret)
1773 clk->phase = degrees; 2132 clk->core->phase = degrees;
1774 2133
1775out_unlock: 2134out_unlock:
1776 clk_prepare_unlock(); 2135 clk_prepare_unlock();
@@ -1778,15 +2137,9 @@ out_unlock:
1778out: 2137out:
1779 return ret; 2138 return ret;
1780} 2139}
2140EXPORT_SYMBOL_GPL(clk_set_phase);
1781 2141
1782/** 2142static int clk_core_get_phase(struct clk_core *clk)
1783 * clk_get_phase - return the phase shift of a clock signal
1784 * @clk: clock signal source
1785 *
1786 * Returns the phase shift of a clock node in degrees, otherwise returns
1787 * -EERROR.
1788 */
1789int clk_get_phase(struct clk *clk)
1790{ 2143{
1791 int ret = 0; 2144 int ret = 0;
1792 2145
@@ -1800,28 +2153,48 @@ int clk_get_phase(struct clk *clk)
1800out: 2153out:
1801 return ret; 2154 return ret;
1802} 2155}
2156EXPORT_SYMBOL_GPL(clk_get_phase);
2157
2158/**
2159 * clk_get_phase - return the phase shift of a clock signal
2160 * @clk: clock signal source
2161 *
2162 * Returns the phase shift of a clock node in degrees, otherwise returns
2163 * -EERROR.
2164 */
2165int clk_get_phase(struct clk *clk)
2166{
2167 if (!clk)
2168 return 0;
2169
2170 return clk_core_get_phase(clk->core);
2171}
1803 2172
1804/** 2173/**
1805 * __clk_init - initialize the data structures in a struct clk 2174 * __clk_init - initialize the data structures in a struct clk
1806 * @dev: device initializing this clk, placeholder for now 2175 * @dev: device initializing this clk, placeholder for now
1807 * @clk: clk being initialized 2176 * @clk: clk being initialized
1808 * 2177 *
1809 * Initializes the lists in struct clk, queries the hardware for the 2178 * Initializes the lists in struct clk_core, queries the hardware for the
1810 * parent and rate and sets them both. 2179 * parent and rate and sets them both.
1811 */ 2180 */
1812int __clk_init(struct device *dev, struct clk *clk) 2181static int __clk_init(struct device *dev, struct clk *clk_user)
1813{ 2182{
1814 int i, ret = 0; 2183 int i, ret = 0;
1815 struct clk *orphan; 2184 struct clk_core *orphan;
1816 struct hlist_node *tmp2; 2185 struct hlist_node *tmp2;
2186 struct clk_core *clk;
2187 unsigned long rate;
1817 2188
1818 if (!clk) 2189 if (!clk_user)
1819 return -EINVAL; 2190 return -EINVAL;
1820 2191
2192 clk = clk_user->core;
2193
1821 clk_prepare_lock(); 2194 clk_prepare_lock();
1822 2195
1823 /* check to see if a clock with this name is already registered */ 2196 /* check to see if a clock with this name is already registered */
1824 if (__clk_lookup(clk->name)) { 2197 if (clk_core_lookup(clk->name)) {
1825 pr_debug("%s: clk %s already initialized\n", 2198 pr_debug("%s: clk %s already initialized\n",
1826 __func__, clk->name); 2199 __func__, clk->name);
1827 ret = -EEXIST; 2200 ret = -EEXIST;
@@ -1873,7 +2246,7 @@ int __clk_init(struct device *dev, struct clk *clk)
1873 clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *), 2246 clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
1874 GFP_KERNEL); 2247 GFP_KERNEL);
1875 /* 2248 /*
1876 * __clk_lookup returns NULL for parents that have not been 2249 * clk_core_lookup returns NULL for parents that have not been
1877 * clk_init'd; thus any access to clk->parents[] must check 2250 * clk_init'd; thus any access to clk->parents[] must check
1878 * for a NULL pointer. We can always perform lazy lookups for 2251 * for a NULL pointer. We can always perform lazy lookups for
1879 * missing parents later on. 2252 * missing parents later on.
@@ -1881,7 +2254,7 @@ int __clk_init(struct device *dev, struct clk *clk)
1881 if (clk->parents) 2254 if (clk->parents)
1882 for (i = 0; i < clk->num_parents; i++) 2255 for (i = 0; i < clk->num_parents; i++)
1883 clk->parents[i] = 2256 clk->parents[i] =
1884 __clk_lookup(clk->parent_names[i]); 2257 clk_core_lookup(clk->parent_names[i]);
1885 } 2258 }
1886 2259
1887 clk->parent = __clk_init_parent(clk); 2260 clk->parent = __clk_init_parent(clk);
@@ -1936,12 +2309,13 @@ int __clk_init(struct device *dev, struct clk *clk)
1936 * then rate is set to zero. 2309 * then rate is set to zero.
1937 */ 2310 */
1938 if (clk->ops->recalc_rate) 2311 if (clk->ops->recalc_rate)
1939 clk->rate = clk->ops->recalc_rate(clk->hw, 2312 rate = clk->ops->recalc_rate(clk->hw,
1940 __clk_get_rate(clk->parent)); 2313 clk_core_get_rate_nolock(clk->parent));
1941 else if (clk->parent) 2314 else if (clk->parent)
1942 clk->rate = clk->parent->rate; 2315 rate = clk->parent->rate;
1943 else 2316 else
1944 clk->rate = 0; 2317 rate = 0;
2318 clk->rate = clk->req_rate = rate;
1945 2319
1946 /* 2320 /*
1947 * walk the list of orphan clocks and reparent any that are children of 2321 * walk the list of orphan clocks and reparent any that are children of
@@ -1951,13 +2325,13 @@ int __clk_init(struct device *dev, struct clk *clk)
1951 if (orphan->num_parents && orphan->ops->get_parent) { 2325 if (orphan->num_parents && orphan->ops->get_parent) {
1952 i = orphan->ops->get_parent(orphan->hw); 2326 i = orphan->ops->get_parent(orphan->hw);
1953 if (!strcmp(clk->name, orphan->parent_names[i])) 2327 if (!strcmp(clk->name, orphan->parent_names[i]))
1954 __clk_reparent(orphan, clk); 2328 clk_core_reparent(orphan, clk);
1955 continue; 2329 continue;
1956 } 2330 }
1957 2331
1958 for (i = 0; i < orphan->num_parents; i++) 2332 for (i = 0; i < orphan->num_parents; i++)
1959 if (!strcmp(clk->name, orphan->parent_names[i])) { 2333 if (!strcmp(clk->name, orphan->parent_names[i])) {
1960 __clk_reparent(orphan, clk); 2334 clk_core_reparent(orphan, clk);
1961 break; 2335 break;
1962 } 2336 }
1963 } 2337 }
@@ -1983,47 +2357,39 @@ out:
1983 return ret; 2357 return ret;
1984} 2358}
1985 2359
1986/** 2360struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
1987 * __clk_register - register a clock and return a cookie. 2361 const char *con_id)
1988 *
1989 * Same as clk_register, except that the .clk field inside hw shall point to a
1990 * preallocated (generally statically allocated) struct clk. None of the fields
1991 * of the struct clk need to be initialized.
1992 *
1993 * The data pointed to by .init and .clk field shall NOT be marked as init
1994 * data.
1995 *
1996 * __clk_register is only exposed via clk-private.h and is intended for use with
1997 * very large numbers of clocks that need to be statically initialized. It is
1998 * a layering violation to include clk-private.h from any code which implements
1999 * a clock's .ops; as such any statically initialized clock data MUST be in a
2000 * separate C file from the logic that implements its operations. Returns 0
2001 * on success, otherwise an error code.
2002 */
2003struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
2004{ 2362{
2005 int ret;
2006 struct clk *clk; 2363 struct clk *clk;
2007 2364
2008 clk = hw->clk; 2365 /* This is to allow this function to be chained to others */
2009 clk->name = hw->init->name; 2366 if (!hw || IS_ERR(hw))
2010 clk->ops = hw->init->ops; 2367 return (struct clk *) hw;
2011 clk->hw = hw;
2012 clk->flags = hw->init->flags;
2013 clk->parent_names = hw->init->parent_names;
2014 clk->num_parents = hw->init->num_parents;
2015 if (dev && dev->driver)
2016 clk->owner = dev->driver->owner;
2017 else
2018 clk->owner = NULL;
2019 2368
2020 ret = __clk_init(dev, clk); 2369 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2021 if (ret) 2370 if (!clk)
2022 return ERR_PTR(ret); 2371 return ERR_PTR(-ENOMEM);
2372
2373 clk->core = hw->core;
2374 clk->dev_id = dev_id;
2375 clk->con_id = con_id;
2376 clk->max_rate = ULONG_MAX;
2377
2378 clk_prepare_lock();
2379 hlist_add_head(&clk->child_node, &hw->core->clks);
2380 clk_prepare_unlock();
2023 2381
2024 return clk; 2382 return clk;
2025} 2383}
2026EXPORT_SYMBOL_GPL(__clk_register); 2384
2385void __clk_free_clk(struct clk *clk)
2386{
2387 clk_prepare_lock();
2388 hlist_del(&clk->child_node);
2389 clk_prepare_unlock();
2390
2391 kfree(clk);
2392}
2027 2393
2028/** 2394/**
2029 * clk_register - allocate a new clock, register it and return an opaque cookie 2395 * clk_register - allocate a new clock, register it and return an opaque cookie
@@ -2039,7 +2405,7 @@ EXPORT_SYMBOL_GPL(__clk_register);
2039struct clk *clk_register(struct device *dev, struct clk_hw *hw) 2405struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2040{ 2406{
2041 int i, ret; 2407 int i, ret;
2042 struct clk *clk; 2408 struct clk_core *clk;
2043 2409
2044 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 2410 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2045 if (!clk) { 2411 if (!clk) {
@@ -2060,7 +2426,7 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2060 clk->hw = hw; 2426 clk->hw = hw;
2061 clk->flags = hw->init->flags; 2427 clk->flags = hw->init->flags;
2062 clk->num_parents = hw->init->num_parents; 2428 clk->num_parents = hw->init->num_parents;
2063 hw->clk = clk; 2429 hw->core = clk;
2064 2430
2065 /* allocate local copy in case parent_names is __initdata */ 2431 /* allocate local copy in case parent_names is __initdata */
2066 clk->parent_names = kcalloc(clk->num_parents, sizeof(char *), 2432 clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
@@ -2084,9 +2450,21 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2084 } 2450 }
2085 } 2451 }
2086 2452
2087 ret = __clk_init(dev, clk); 2453 INIT_HLIST_HEAD(&clk->clks);
2454
2455 hw->clk = __clk_create_clk(hw, NULL, NULL);
2456 if (IS_ERR(hw->clk)) {
2457 pr_err("%s: could not allocate per-user clk\n", __func__);
2458 ret = PTR_ERR(hw->clk);
2459 goto fail_parent_names_copy;
2460 }
2461
2462 ret = __clk_init(dev, hw->clk);
2088 if (!ret) 2463 if (!ret)
2089 return clk; 2464 return hw->clk;
2465
2466 __clk_free_clk(hw->clk);
2467 hw->clk = NULL;
2090 2468
2091fail_parent_names_copy: 2469fail_parent_names_copy:
2092 while (--i >= 0) 2470 while (--i >= 0)
@@ -2107,7 +2485,7 @@ EXPORT_SYMBOL_GPL(clk_register);
2107 */ 2485 */
2108static void __clk_release(struct kref *ref) 2486static void __clk_release(struct kref *ref)
2109{ 2487{
2110 struct clk *clk = container_of(ref, struct clk, ref); 2488 struct clk_core *clk = container_of(ref, struct clk_core, ref);
2111 int i = clk->num_parents; 2489 int i = clk->num_parents;
2112 2490
2113 kfree(clk->parents); 2491 kfree(clk->parents);
@@ -2165,12 +2543,13 @@ void clk_unregister(struct clk *clk)
2165 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2543 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2166 return; 2544 return;
2167 2545
2168 clk_debug_unregister(clk); 2546 clk_debug_unregister(clk->core);
2169 2547
2170 clk_prepare_lock(); 2548 clk_prepare_lock();
2171 2549
2172 if (clk->ops == &clk_nodrv_ops) { 2550 if (clk->core->ops == &clk_nodrv_ops) {
2173 pr_err("%s: unregistered clock: %s\n", __func__, clk->name); 2551 pr_err("%s: unregistered clock: %s\n", __func__,
2552 clk->core->name);
2174 return; 2553 return;
2175 } 2554 }
2176 /* 2555 /*
@@ -2178,24 +2557,25 @@ void clk_unregister(struct clk *clk)
2178 * a reference to this clock. 2557 * a reference to this clock.
2179 */ 2558 */
2180 flags = clk_enable_lock(); 2559 flags = clk_enable_lock();
2181 clk->ops = &clk_nodrv_ops; 2560 clk->core->ops = &clk_nodrv_ops;
2182 clk_enable_unlock(flags); 2561 clk_enable_unlock(flags);
2183 2562
2184 if (!hlist_empty(&clk->children)) { 2563 if (!hlist_empty(&clk->core->children)) {
2185 struct clk *child; 2564 struct clk_core *child;
2186 struct hlist_node *t; 2565 struct hlist_node *t;
2187 2566
2188 /* Reparent all children to the orphan list. */ 2567 /* Reparent all children to the orphan list. */
2189 hlist_for_each_entry_safe(child, t, &clk->children, child_node) 2568 hlist_for_each_entry_safe(child, t, &clk->core->children,
2190 clk_set_parent(child, NULL); 2569 child_node)
2570 clk_core_set_parent(child, NULL);
2191 } 2571 }
2192 2572
2193 hlist_del_init(&clk->child_node); 2573 hlist_del_init(&clk->core->child_node);
2194 2574
2195 if (clk->prepare_count) 2575 if (clk->core->prepare_count)
2196 pr_warn("%s: unregistering prepared clock: %s\n", 2576 pr_warn("%s: unregistering prepared clock: %s\n",
2197 __func__, clk->name); 2577 __func__, clk->core->name);
2198 kref_put(&clk->ref, __clk_release); 2578 kref_put(&clk->core->ref, __clk_release);
2199 2579
2200 clk_prepare_unlock(); 2580 clk_prepare_unlock();
2201} 2581}
@@ -2263,11 +2643,13 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister);
2263 */ 2643 */
2264int __clk_get(struct clk *clk) 2644int __clk_get(struct clk *clk)
2265{ 2645{
2266 if (clk) { 2646 struct clk_core *core = !clk ? NULL : clk->core;
2267 if (!try_module_get(clk->owner)) 2647
2648 if (core) {
2649 if (!try_module_get(core->owner))
2268 return 0; 2650 return 0;
2269 2651
2270 kref_get(&clk->ref); 2652 kref_get(&core->ref);
2271 } 2653 }
2272 return 1; 2654 return 1;
2273} 2655}
@@ -2280,11 +2662,20 @@ void __clk_put(struct clk *clk)
2280 return; 2662 return;
2281 2663
2282 clk_prepare_lock(); 2664 clk_prepare_lock();
2283 owner = clk->owner; 2665
2284 kref_put(&clk->ref, __clk_release); 2666 hlist_del(&clk->child_node);
2667 if (clk->min_rate > clk->core->req_rate ||
2668 clk->max_rate < clk->core->req_rate)
2669 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
2670
2671 owner = clk->core->owner;
2672 kref_put(&clk->core->ref, __clk_release);
2673
2285 clk_prepare_unlock(); 2674 clk_prepare_unlock();
2286 2675
2287 module_put(owner); 2676 module_put(owner);
2677
2678 kfree(clk);
2288} 2679}
2289 2680
2290/*** clk rate change notifiers ***/ 2681/*** clk rate change notifiers ***/
@@ -2339,7 +2730,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2339 2730
2340 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 2731 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
2341 2732
2342 clk->notifier_count++; 2733 clk->core->notifier_count++;
2343 2734
2344out: 2735out:
2345 clk_prepare_unlock(); 2736 clk_prepare_unlock();
@@ -2376,7 +2767,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
2376 if (cn->clk == clk) { 2767 if (cn->clk == clk) {
2377 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 2768 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
2378 2769
2379 clk->notifier_count--; 2770 clk->core->notifier_count--;
2380 2771
2381 /* XXX the notifier code should handle this better */ 2772 /* XXX the notifier code should handle this better */
2382 if (!cn->notifier_head.head) { 2773 if (!cn->notifier_head.head) {
@@ -2506,7 +2897,8 @@ void of_clk_del_provider(struct device_node *np)
2506} 2897}
2507EXPORT_SYMBOL_GPL(of_clk_del_provider); 2898EXPORT_SYMBOL_GPL(of_clk_del_provider);
2508 2899
2509struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec) 2900struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
2901 const char *dev_id, const char *con_id)
2510{ 2902{
2511 struct of_clk_provider *provider; 2903 struct of_clk_provider *provider;
2512 struct clk *clk = ERR_PTR(-EPROBE_DEFER); 2904 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
@@ -2515,8 +2907,17 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
2515 list_for_each_entry(provider, &of_clk_providers, link) { 2907 list_for_each_entry(provider, &of_clk_providers, link) {
2516 if (provider->node == clkspec->np) 2908 if (provider->node == clkspec->np)
2517 clk = provider->get(clkspec, provider->data); 2909 clk = provider->get(clkspec, provider->data);
2518 if (!IS_ERR(clk)) 2910 if (!IS_ERR(clk)) {
2911 clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
2912 con_id);
2913
2914 if (!IS_ERR(clk) && !__clk_get(clk)) {
2915 __clk_free_clk(clk);
2916 clk = ERR_PTR(-ENOENT);
2917 }
2918
2519 break; 2919 break;
2920 }
2520 } 2921 }
2521 2922
2522 return clk; 2923 return clk;
@@ -2527,7 +2928,7 @@ struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
2527 struct clk *clk; 2928 struct clk *clk;
2528 2929
2529 mutex_lock(&of_clk_mutex); 2930 mutex_lock(&of_clk_mutex);
2530 clk = __of_clk_get_from_provider(clkspec); 2931 clk = __of_clk_get_from_provider(clkspec, NULL, __func__);
2531 mutex_unlock(&of_clk_mutex); 2932 mutex_unlock(&of_clk_mutex);
2532 2933
2533 return clk; 2934 return clk;
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index c798138f023f..ba845408cc3e 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -9,9 +9,31 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12struct clk_hw;
13
12#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 14#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
13struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec); 15struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec);
14struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec); 16struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
17 const char *dev_id, const char *con_id);
15void of_clk_lock(void); 18void of_clk_lock(void);
16void of_clk_unlock(void); 19void of_clk_unlock(void);
17#endif 20#endif
21
22#ifdef CONFIG_COMMON_CLK
23struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
24 const char *con_id);
25void __clk_free_clk(struct clk *clk);
26#else
27/* All these casts to avoid ifdefs in clkdev... */
28static inline struct clk *
29__clk_create_clk(struct clk_hw *hw, const char *dev_id, const char *con_id)
30{
31 return (struct clk *)hw;
32}
33static inline void __clk_free_clk(struct clk *clk) { }
34static struct clk_hw *__clk_get_hw(struct clk *clk)
35{
36 return (struct clk_hw *)clk;
37}
38
39#endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index da4bda8b7fc7..043fd3633373 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -19,6 +19,7 @@
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/clkdev.h> 21#include <linux/clkdev.h>
22#include <linux/clk-provider.h>
22#include <linux/of.h> 23#include <linux/of.h>
23 24
24#include "clk.h" 25#include "clk.h"
@@ -28,6 +29,20 @@ static DEFINE_MUTEX(clocks_mutex);
28 29
29#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 30#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
30 31
32static struct clk *__of_clk_get_by_clkspec(struct of_phandle_args *clkspec,
33 const char *dev_id, const char *con_id)
34{
35 struct clk *clk;
36
37 if (!clkspec)
38 return ERR_PTR(-EINVAL);
39
40 of_clk_lock();
41 clk = __of_clk_get_from_provider(clkspec, dev_id, con_id);
42 of_clk_unlock();
43 return clk;
44}
45
31/** 46/**
32 * of_clk_get_by_clkspec() - Lookup a clock form a clock provider 47 * of_clk_get_by_clkspec() - Lookup a clock form a clock provider
33 * @clkspec: pointer to a clock specifier data structure 48 * @clkspec: pointer to a clock specifier data structure
@@ -38,22 +53,11 @@ static DEFINE_MUTEX(clocks_mutex);
38 */ 53 */
39struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec) 54struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec)
40{ 55{
41 struct clk *clk; 56 return __of_clk_get_by_clkspec(clkspec, NULL, __func__);
42
43 if (!clkspec)
44 return ERR_PTR(-EINVAL);
45
46 of_clk_lock();
47 clk = __of_clk_get_from_provider(clkspec);
48
49 if (!IS_ERR(clk) && !__clk_get(clk))
50 clk = ERR_PTR(-ENOENT);
51
52 of_clk_unlock();
53 return clk;
54} 57}
55 58
56struct clk *of_clk_get(struct device_node *np, int index) 59static struct clk *__of_clk_get(struct device_node *np, int index,
60 const char *dev_id, const char *con_id)
57{ 61{
58 struct of_phandle_args clkspec; 62 struct of_phandle_args clkspec;
59 struct clk *clk; 63 struct clk *clk;
@@ -67,22 +71,21 @@ struct clk *of_clk_get(struct device_node *np, int index)
67 if (rc) 71 if (rc)
68 return ERR_PTR(rc); 72 return ERR_PTR(rc);
69 73
70 clk = of_clk_get_by_clkspec(&clkspec); 74 clk = __of_clk_get_by_clkspec(&clkspec, dev_id, con_id);
71 of_node_put(clkspec.np); 75 of_node_put(clkspec.np);
76
72 return clk; 77 return clk;
73} 78}
79
80struct clk *of_clk_get(struct device_node *np, int index)
81{
82 return __of_clk_get(np, index, np->full_name, NULL);
83}
74EXPORT_SYMBOL(of_clk_get); 84EXPORT_SYMBOL(of_clk_get);
75 85
76/** 86static struct clk *__of_clk_get_by_name(struct device_node *np,
77 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node 87 const char *dev_id,
78 * @np: pointer to clock consumer node 88 const char *name)
79 * @name: name of consumer's clock input, or NULL for the first clock reference
80 *
81 * This function parses the clocks and clock-names properties,
82 * and uses them to look up the struct clk from the registered list of clock
83 * providers.
84 */
85struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
86{ 89{
87 struct clk *clk = ERR_PTR(-ENOENT); 90 struct clk *clk = ERR_PTR(-ENOENT);
88 91
@@ -97,10 +100,10 @@ struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
97 */ 100 */
98 if (name) 101 if (name)
99 index = of_property_match_string(np, "clock-names", name); 102 index = of_property_match_string(np, "clock-names", name);
100 clk = of_clk_get(np, index); 103 clk = __of_clk_get(np, index, dev_id, name);
101 if (!IS_ERR(clk)) 104 if (!IS_ERR(clk)) {
102 break; 105 break;
103 else if (name && index >= 0) { 106 } else if (name && index >= 0) {
104 if (PTR_ERR(clk) != -EPROBE_DEFER) 107 if (PTR_ERR(clk) != -EPROBE_DEFER)
105 pr_err("ERROR: could not get clock %s:%s(%i)\n", 108 pr_err("ERROR: could not get clock %s:%s(%i)\n",
106 np->full_name, name ? name : "", index); 109 np->full_name, name ? name : "", index);
@@ -119,7 +122,33 @@ struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
119 122
120 return clk; 123 return clk;
121} 124}
125
126/**
127 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
128 * @np: pointer to clock consumer node
129 * @name: name of consumer's clock input, or NULL for the first clock reference
130 *
131 * This function parses the clocks and clock-names properties,
132 * and uses them to look up the struct clk from the registered list of clock
133 * providers.
134 */
135struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
136{
137 if (!np)
138 return ERR_PTR(-ENOENT);
139
140 return __of_clk_get_by_name(np, np->full_name, name);
141}
122EXPORT_SYMBOL(of_clk_get_by_name); 142EXPORT_SYMBOL(of_clk_get_by_name);
143
144#else /* defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) */
145
146static struct clk *__of_clk_get_by_name(struct device_node *np,
147 const char *dev_id,
148 const char *name)
149{
150 return ERR_PTR(-ENOENT);
151}
123#endif 152#endif
124 153
125/* 154/*
@@ -168,14 +197,28 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
168struct clk *clk_get_sys(const char *dev_id, const char *con_id) 197struct clk *clk_get_sys(const char *dev_id, const char *con_id)
169{ 198{
170 struct clk_lookup *cl; 199 struct clk_lookup *cl;
200 struct clk *clk = NULL;
171 201
172 mutex_lock(&clocks_mutex); 202 mutex_lock(&clocks_mutex);
203
173 cl = clk_find(dev_id, con_id); 204 cl = clk_find(dev_id, con_id);
174 if (cl && !__clk_get(cl->clk)) 205 if (!cl)
206 goto out;
207
208 clk = __clk_create_clk(__clk_get_hw(cl->clk), dev_id, con_id);
209 if (IS_ERR(clk))
210 goto out;
211
212 if (!__clk_get(clk)) {
213 __clk_free_clk(clk);
175 cl = NULL; 214 cl = NULL;
215 goto out;
216 }
217
218out:
176 mutex_unlock(&clocks_mutex); 219 mutex_unlock(&clocks_mutex);
177 220
178 return cl ? cl->clk : ERR_PTR(-ENOENT); 221 return cl ? clk : ERR_PTR(-ENOENT);
179} 222}
180EXPORT_SYMBOL(clk_get_sys); 223EXPORT_SYMBOL(clk_get_sys);
181 224
@@ -185,10 +228,8 @@ struct clk *clk_get(struct device *dev, const char *con_id)
185 struct clk *clk; 228 struct clk *clk;
186 229
187 if (dev) { 230 if (dev) {
188 clk = of_clk_get_by_name(dev->of_node, con_id); 231 clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id);
189 if (!IS_ERR(clk)) 232 if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
190 return clk;
191 if (PTR_ERR(clk) == -EPROBE_DEFER)
192 return clk; 233 return clk;
193 } 234 }
194 235
@@ -331,6 +372,7 @@ int clk_register_clkdev(struct clk *clk, const char *con_id,
331 372
332 return 0; 373 return 0;
333} 374}
375EXPORT_SYMBOL(clk_register_clkdev);
334 376
335/** 377/**
336 * clk_register_clkdevs - register a set of clk_lookup for a struct clk 378 * clk_register_clkdevs - register a set of clk_lookup for a struct clk
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index 007144f81f50..2e4f6d432beb 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -295,6 +295,8 @@ static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw,
295} 295}
296 296
297static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate, 297static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
298 unsigned long min_rate,
299 unsigned long max_rate,
298 unsigned long *best_parent_rate, 300 unsigned long *best_parent_rate,
299 struct clk_hw **best_parent_p) 301 struct clk_hw **best_parent_p)
300{ 302{
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
index 48fa53c7ce5e..de6a873175d2 100644
--- a/drivers/clk/mmp/clk-mix.c
+++ b/drivers/clk/mmp/clk-mix.c
@@ -202,6 +202,8 @@ error:
202} 202}
203 203
204static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate, 204static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
205 unsigned long min_rate,
206 unsigned long max_rate,
205 unsigned long *best_parent_rate, 207 unsigned long *best_parent_rate,
206 struct clk_hw **best_parent_clk) 208 struct clk_hw **best_parent_clk)
207{ 209{
diff --git a/drivers/clk/pxa/Makefile b/drivers/clk/pxa/Makefile
index 38e915344605..38e37bf6b821 100644
--- a/drivers/clk/pxa/Makefile
+++ b/drivers/clk/pxa/Makefile
@@ -1,3 +1,4 @@
1obj-y += clk-pxa.o 1obj-y += clk-pxa.o
2obj-$(CONFIG_PXA25x) += clk-pxa25x.o 2obj-$(CONFIG_PXA25x) += clk-pxa25x.o
3obj-$(CONFIG_PXA27x) += clk-pxa27x.o 3obj-$(CONFIG_PXA27x) += clk-pxa27x.o
4obj-$(CONFIG_PXA3xx) += clk-pxa3xx.o
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index 4e834753ab09..29cee9e8d4d9 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -46,7 +46,7 @@ static unsigned long cken_recalc_rate(struct clk_hw *hw,
46 fix = &pclk->lp; 46 fix = &pclk->lp;
47 else 47 else
48 fix = &pclk->hp; 48 fix = &pclk->hp;
49 fix->hw.clk = hw->clk; 49 __clk_hw_set_clk(&fix->hw, hw);
50 return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate); 50 return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate);
51} 51}
52 52
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
new file mode 100644
index 000000000000..39f891bba09a
--- /dev/null
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -0,0 +1,364 @@
1/*
2 * Marvell PXA3xxx family clocks
3 *
4 * Copyright (C) 2014 Robert Jarzmik
5 *
6 * Heavily inspired from former arch/arm/mach-pxa/pxa3xx.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * For non-devicetree platforms. Once pxa is fully converted to devicetree, this
13 * should go away.
14 */
15#include <linux/io.h>
16#include <linux/clk.h>
17#include <linux/clk-provider.h>
18#include <linux/clkdev.h>
19#include <linux/of.h>
20#include <mach/smemc.h>
21#include <mach/pxa3xx-regs.h>
22
23#include <dt-bindings/clock/pxa-clock.h>
24#include "clk-pxa.h"
25
26#define KHz 1000
27#define MHz (1000 * 1000)
28
29enum {
30 PXA_CORE_60Mhz = 0,
31 PXA_CORE_RUN,
32 PXA_CORE_TURBO,
33};
34
35enum {
36 PXA_BUS_60Mhz = 0,
37 PXA_BUS_HSS,
38};
39
40/* crystal frequency to HSIO bus frequency multiplier (HSS) */
41static unsigned char hss_mult[4] = { 8, 12, 16, 24 };
42
43/* crystal frequency to static memory controller multiplier (SMCFS) */
44static unsigned int smcfs_mult[8] = { 6, 0, 8, 0, 0, 16, };
45static unsigned int df_clkdiv[4] = { 1, 2, 4, 1 };
46
47static const char * const get_freq_khz[] = {
48 "core", "ring_osc_60mhz", "run", "cpll", "system_bus"
49};
50
51/*
52 * Get the clock frequency as reflected by ACSR and the turbo flag.
53 * We assume these values have been applied via a fcs.
54 * If info is not 0 we also display the current settings.
55 */
56unsigned int pxa3xx_get_clk_frequency_khz(int info)
57{
58 struct clk *clk;
59 unsigned long clks[5];
60 int i;
61
62 for (i = 0; i < 5; i++) {
63 clk = clk_get(NULL, get_freq_khz[i]);
64 if (IS_ERR(clk)) {
65 clks[i] = 0;
66 } else {
67 clks[i] = clk_get_rate(clk);
68 clk_put(clk);
69 }
70 }
71 if (info) {
72 pr_info("RO Mode clock: %ld.%02ldMHz\n",
73 clks[1] / 1000000, (clks[0] % 1000000) / 10000);
74 pr_info("Run Mode clock: %ld.%02ldMHz\n",
75 clks[2] / 1000000, (clks[1] % 1000000) / 10000);
76 pr_info("Turbo Mode clock: %ld.%02ldMHz\n",
77 clks[3] / 1000000, (clks[2] % 1000000) / 10000);
78 pr_info("System bus clock: %ld.%02ldMHz\n",
79 clks[4] / 1000000, (clks[4] % 1000000) / 10000);
80 }
81 return (unsigned int)clks[0];
82}
83
84static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw,
85 unsigned long parent_rate)
86{
87 unsigned long ac97_div, rate;
88
89 ac97_div = AC97_DIV;
90
91 /* This may loose precision for some rates but won't for the
92 * standard 24.576MHz.
93 */
94 rate = parent_rate / 2;
95 rate /= ((ac97_div >> 12) & 0x7fff);
96 rate *= (ac97_div & 0xfff);
97
98 return rate;
99}
100PARENTS(clk_pxa3xx_ac97) = { "spll_624mhz" };
101RATE_RO_OPS(clk_pxa3xx_ac97, "ac97");
102
103static unsigned long clk_pxa3xx_smemc_get_rate(struct clk_hw *hw,
104 unsigned long parent_rate)
105{
106 unsigned long acsr = ACSR;
107 unsigned long memclkcfg = __raw_readl(MEMCLKCFG);
108
109 return (parent_rate / 48) * smcfs_mult[(acsr >> 23) & 0x7] /
110 df_clkdiv[(memclkcfg >> 16) & 0x3];
111}
112PARENTS(clk_pxa3xx_smemc) = { "spll_624mhz" };
113RATE_RO_OPS(clk_pxa3xx_smemc, "smemc");
114
115static bool pxa3xx_is_ring_osc_forced(void)
116{
117 unsigned long acsr = ACSR;
118
119 return acsr & ACCR_D0CS;
120}
121
122PARENTS(pxa3xx_pbus) = { "ring_osc_60mhz", "spll_624mhz" };
123PARENTS(pxa3xx_32Khz_bus) = { "osc_32_768khz", "osc_32_768khz" };
124PARENTS(pxa3xx_13MHz_bus) = { "osc_13mhz", "osc_13mhz" };
125PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
126PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
127PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
128
129#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
130#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
131 div_hp, bit, is_lp, flags) \
132 PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
133 mult_hp, div_hp, is_lp, CKEN_AB(bit), \
134 (CKEN_ ## bit % 32), flags)
135#define PXA3XX_PBUS_CKEN(dev_id, con_id, bit, mult_lp, div_lp, \
136 mult_hp, div_hp, delay) \
137 PXA3XX_CKEN(dev_id, con_id, pxa3xx_pbus_parents, mult_lp, \
138 div_lp, mult_hp, div_hp, bit, pxa3xx_is_ring_osc_forced, 0)
139#define PXA3XX_CKEN_1RATE(dev_id, con_id, bit, parents) \
140 PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
141 CKEN_AB(bit), (CKEN_ ## bit % 32), 0)
142
143static struct desc_clk_cken pxa3xx_clocks[] __initdata = {
144 PXA3XX_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 1, 4, 1, 42, 1),
145 PXA3XX_PBUS_CKEN("pxa2xx-uart.1", NULL, BTUART, 1, 4, 1, 42, 1),
146 PXA3XX_PBUS_CKEN("pxa2xx-uart.2", NULL, STUART, 1, 4, 1, 42, 1),
147 PXA3XX_PBUS_CKEN("pxa2xx-i2c.0", NULL, I2C, 2, 5, 1, 19, 0),
148 PXA3XX_PBUS_CKEN("pxa27x-udc", NULL, UDC, 1, 4, 1, 13, 5),
149 PXA3XX_PBUS_CKEN("pxa27x-ohci", NULL, USBH, 1, 4, 1, 13, 0),
150 PXA3XX_PBUS_CKEN("pxa3xx-u2d", NULL, USB2, 1, 4, 1, 13, 0),
151 PXA3XX_PBUS_CKEN("pxa27x-pwm.0", NULL, PWM0, 1, 6, 1, 48, 0),
152 PXA3XX_PBUS_CKEN("pxa27x-pwm.1", NULL, PWM1, 1, 6, 1, 48, 0),
153 PXA3XX_PBUS_CKEN("pxa2xx-mci.0", NULL, MMC1, 1, 4, 1, 24, 0),
154 PXA3XX_PBUS_CKEN("pxa2xx-mci.1", NULL, MMC2, 1, 4, 1, 24, 0),
155 PXA3XX_PBUS_CKEN("pxa2xx-mci.2", NULL, MMC3, 1, 4, 1, 24, 0),
156
157 PXA3XX_CKEN_1RATE("pxa27x-keypad", NULL, KEYPAD,
158 pxa3xx_32Khz_bus_parents),
159 PXA3XX_CKEN_1RATE("pxa3xx-ssp.0", NULL, SSP1, pxa3xx_13MHz_bus_parents),
160 PXA3XX_CKEN_1RATE("pxa3xx-ssp.1", NULL, SSP2, pxa3xx_13MHz_bus_parents),
161 PXA3XX_CKEN_1RATE("pxa3xx-ssp.2", NULL, SSP3, pxa3xx_13MHz_bus_parents),
162 PXA3XX_CKEN_1RATE("pxa3xx-ssp.3", NULL, SSP4, pxa3xx_13MHz_bus_parents),
163
164 PXA3XX_CKEN(NULL, "AC97CLK", pxa3xx_ac97_bus_parents, 1, 4, 1, 1, AC97,
165 pxa3xx_is_ring_osc_forced, 0),
166 PXA3XX_CKEN(NULL, "CAMCLK", pxa3xx_sbus_parents, 1, 2, 1, 1, CAMERA,
167 pxa3xx_is_ring_osc_forced, 0),
168 PXA3XX_CKEN("pxa2xx-fb", NULL, pxa3xx_sbus_parents, 1, 1, 1, 1, LCD,
169 pxa3xx_is_ring_osc_forced, 0),
170 PXA3XX_CKEN("pxa2xx-pcmcia", NULL, pxa3xx_smemcbus_parents, 1, 4,
171 1, 1, SMC, pxa3xx_is_ring_osc_forced, CLK_IGNORE_UNUSED),
172};
173
174static struct desc_clk_cken pxa300_310_clocks[] __initdata = {
175
176 PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA300_GCU, 1, 1, 1, 1, 0),
177 PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 4, 0),
178 PXA3XX_CKEN_1RATE("pxa3xx-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
179};
180
181static struct desc_clk_cken pxa320_clocks[] __initdata = {
182 PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 6, 0),
183 PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA320_GCU, 1, 1, 1, 1, 0),
184 PXA3XX_CKEN_1RATE("pxa3xx-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
185};
186
187static struct desc_clk_cken pxa93x_clocks[] __initdata = {
188
189 PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA300_GCU, 1, 1, 1, 1, 0),
190 PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 4, 0),
191 PXA3XX_CKEN_1RATE("pxa93x-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
192};
193
194static unsigned long clk_pxa3xx_system_bus_get_rate(struct clk_hw *hw,
195 unsigned long parent_rate)
196{
197 unsigned long acsr = ACSR;
198 unsigned int hss = (acsr >> 14) & 0x3;
199
200 if (pxa3xx_is_ring_osc_forced())
201 return parent_rate;
202 return parent_rate / 48 * hss_mult[hss];
203}
204
205static u8 clk_pxa3xx_system_bus_get_parent(struct clk_hw *hw)
206{
207 if (pxa3xx_is_ring_osc_forced())
208 return PXA_BUS_60Mhz;
209 else
210 return PXA_BUS_HSS;
211}
212
213PARENTS(clk_pxa3xx_system_bus) = { "ring_osc_60mhz", "spll_624mhz" };
214MUX_RO_RATE_RO_OPS(clk_pxa3xx_system_bus, "system_bus");
215
216static unsigned long clk_pxa3xx_core_get_rate(struct clk_hw *hw,
217 unsigned long parent_rate)
218{
219 return parent_rate;
220}
221
222static u8 clk_pxa3xx_core_get_parent(struct clk_hw *hw)
223{
224 unsigned long xclkcfg;
225 unsigned int t;
226
227 if (pxa3xx_is_ring_osc_forced())
228 return PXA_CORE_60Mhz;
229
230 /* Read XCLKCFG register turbo bit */
231 __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
232 t = xclkcfg & 0x1;
233
234 if (t)
235 return PXA_CORE_TURBO;
236 return PXA_CORE_RUN;
237}
238PARENTS(clk_pxa3xx_core) = { "ring_osc_60mhz", "run", "cpll" };
239MUX_RO_RATE_RO_OPS(clk_pxa3xx_core, "core");
240
241static unsigned long clk_pxa3xx_run_get_rate(struct clk_hw *hw,
242 unsigned long parent_rate)
243{
244 unsigned long acsr = ACSR;
245 unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
246 unsigned int t, xclkcfg;
247
248 /* Read XCLKCFG register turbo bit */
249 __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
250 t = xclkcfg & 0x1;
251
252 return t ? (parent_rate / xn) * 2 : parent_rate;
253}
254PARENTS(clk_pxa3xx_run) = { "cpll" };
255RATE_RO_OPS(clk_pxa3xx_run, "run");
256
257static unsigned long clk_pxa3xx_cpll_get_rate(struct clk_hw *hw,
258 unsigned long parent_rate)
259{
260 unsigned long acsr = ACSR;
261 unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
262 unsigned int xl = acsr & ACCR_XL_MASK;
263 unsigned int t, xclkcfg;
264
265 /* Read XCLKCFG register turbo bit */
266 __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
267 t = xclkcfg & 0x1;
268
269 pr_info("RJK: parent_rate=%lu, xl=%u, xn=%u\n", parent_rate, xl, xn);
270 return t ? parent_rate * xl * xn : parent_rate * xl;
271}
272PARENTS(clk_pxa3xx_cpll) = { "osc_13mhz" };
273RATE_RO_OPS(clk_pxa3xx_cpll, "cpll");
274
275static void __init pxa3xx_register_core(void)
276{
277 clk_register_clk_pxa3xx_cpll();
278 clk_register_clk_pxa3xx_run();
279
280 clkdev_pxa_register(CLK_CORE, "core", NULL,
281 clk_register_clk_pxa3xx_core());
282}
283
284static void __init pxa3xx_register_plls(void)
285{
286 clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
287 CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
288 13 * MHz);
289 clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
290 CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
291 32768);
292 clk_register_fixed_rate(NULL, "ring_osc_120mhz", NULL,
293 CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
294 120 * MHz);
295 clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
296 clk_register_fixed_factor(NULL, "spll_624mhz", "osc_13mhz", 0, 48, 1);
297 clk_register_fixed_factor(NULL, "ring_osc_60mhz", "ring_osc_120mhz",
298 0, 1, 2);
299}
300
301#define DUMMY_CLK(_con_id, _dev_id, _parent) \
302 { .con_id = _con_id, .dev_id = _dev_id, .parent = _parent }
303struct dummy_clk {
304 const char *con_id;
305 const char *dev_id;
306 const char *parent;
307};
308static struct dummy_clk dummy_clks[] __initdata = {
309 DUMMY_CLK(NULL, "pxa93x-gpio", "osc_13mhz"),
310 DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
311 DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
312 DUMMY_CLK(NULL, "pxa3xx-pwri2c.1", "osc_13mhz"),
313};
314
315static void __init pxa3xx_dummy_clocks_init(void)
316{
317 struct clk *clk;
318 struct dummy_clk *d;
319 const char *name;
320 int i;
321
322 for (i = 0; i < ARRAY_SIZE(dummy_clks); i++) {
323 d = &dummy_clks[i];
324 name = d->dev_id ? d->dev_id : d->con_id;
325 clk = clk_register_fixed_factor(NULL, name, d->parent, 0, 1, 1);
326 clk_register_clkdev(clk, d->con_id, d->dev_id);
327 }
328}
329
330static void __init pxa3xx_base_clocks_init(void)
331{
332 pxa3xx_register_plls();
333 pxa3xx_register_core();
334 clk_register_clk_pxa3xx_system_bus();
335 clk_register_clk_pxa3xx_ac97();
336 clk_register_clk_pxa3xx_smemc();
337 clk_register_gate(NULL, "CLK_POUT", "osc_13mhz", 0,
338 (void __iomem *)&OSCC, 11, 0, NULL);
339}
340
341int __init pxa3xx_clocks_init(void)
342{
343 int ret;
344
345 pxa3xx_base_clocks_init();
346 pxa3xx_dummy_clocks_init();
347 ret = clk_pxa_cken_init(pxa3xx_clocks, ARRAY_SIZE(pxa3xx_clocks));
348 if (ret)
349 return ret;
350 if (cpu_is_pxa320())
351 return clk_pxa_cken_init(pxa320_clocks,
352 ARRAY_SIZE(pxa320_clocks));
353 if (cpu_is_pxa300() || cpu_is_pxa310())
354 return clk_pxa_cken_init(pxa300_310_clocks,
355 ARRAY_SIZE(pxa300_310_clocks));
356 return clk_pxa_cken_init(pxa93x_clocks, ARRAY_SIZE(pxa93x_clocks));
357}
358
359static void __init pxa3xx_dt_clocks_init(struct device_node *np)
360{
361 pxa3xx_clocks_init();
362 clk_pxa_dt_common_init(np);
363}
364CLK_OF_DECLARE(pxa_clks, "marvell,pxa300-clocks", pxa3xx_dt_clocks_init);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 1107351ed346..0d7ab52b7ab0 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -29,6 +29,15 @@ config IPQ_GCC_806X
29 Say Y if you want to use peripheral devices such as UART, SPI, 29 Say Y if you want to use peripheral devices such as UART, SPI,
30 i2c, USB, SD/eMMC, etc. 30 i2c, USB, SD/eMMC, etc.
31 31
32config IPQ_LCC_806X
33 tristate "IPQ806x LPASS Clock Controller"
34 select IPQ_GCC_806X
35 depends on COMMON_CLK_QCOM
36 help
37 Support for the LPASS clock controller on ipq806x devices.
38 Say Y if you want to use audio devices such as i2s, pcm,
39 S/PDIF, etc.
40
32config MSM_GCC_8660 41config MSM_GCC_8660
33 tristate "MSM8660 Global Clock Controller" 42 tristate "MSM8660 Global Clock Controller"
34 depends on COMMON_CLK_QCOM 43 depends on COMMON_CLK_QCOM
@@ -45,6 +54,15 @@ config MSM_GCC_8960
45 Say Y if you want to use peripheral devices such as UART, SPI, 54 Say Y if you want to use peripheral devices such as UART, SPI,
46 i2c, USB, SD/eMMC, SATA, PCIe, etc. 55 i2c, USB, SD/eMMC, SATA, PCIe, etc.
47 56
57config MSM_LCC_8960
58 tristate "APQ8064/MSM8960 LPASS Clock Controller"
59 select MSM_GCC_8960
60 depends on COMMON_CLK_QCOM
61 help
62 Support for the LPASS clock controller on apq8064/msm8960 devices.
63 Say Y if you want to use audio devices such as i2s, pcm,
64 SLIMBus, etc.
65
48config MSM_MMCC_8960 66config MSM_MMCC_8960
49 tristate "MSM8960 Multimedia Clock Controller" 67 tristate "MSM8960 Multimedia Clock Controller"
50 select MSM_GCC_8960 68 select MSM_GCC_8960
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 783cfb24faa4..617826469595 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -6,13 +6,17 @@ clk-qcom-y += clk-pll.o
6clk-qcom-y += clk-rcg.o 6clk-qcom-y += clk-rcg.o
7clk-qcom-y += clk-rcg2.o 7clk-qcom-y += clk-rcg2.o
8clk-qcom-y += clk-branch.o 8clk-qcom-y += clk-branch.o
9clk-qcom-y += clk-regmap-divider.o
10clk-qcom-y += clk-regmap-mux.o
9clk-qcom-y += reset.o 11clk-qcom-y += reset.o
10 12
11obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o 13obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
12obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o 14obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
13obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o 15obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
16obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
14obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o 17obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
15obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o 18obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
19obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
16obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o 20obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
17obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o 21obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
18obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o 22obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
index 60873a7f45d9..b4325f65a1bf 100644
--- a/drivers/clk/qcom/clk-pll.c
+++ b/drivers/clk/qcom/clk-pll.c
@@ -141,6 +141,7 @@ struct pll_freq_tbl *find_freq(const struct pll_freq_tbl *f, unsigned long rate)
141 141
142static long 142static long
143clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate, 143clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate,
144 unsigned long min_rate, unsigned long max_rate,
144 unsigned long *p_rate, struct clk_hw **p) 145 unsigned long *p_rate, struct clk_hw **p)
145{ 146{
146 struct clk_pll *pll = to_clk_pll(hw); 147 struct clk_pll *pll = to_clk_pll(hw);
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index 0b93972c8807..0039bd7d3965 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -368,6 +368,7 @@ clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
368 368
369static long _freq_tbl_determine_rate(struct clk_hw *hw, 369static long _freq_tbl_determine_rate(struct clk_hw *hw,
370 const struct freq_tbl *f, unsigned long rate, 370 const struct freq_tbl *f, unsigned long rate,
371 unsigned long min_rate, unsigned long max_rate,
371 unsigned long *p_rate, struct clk_hw **p_hw) 372 unsigned long *p_rate, struct clk_hw **p_hw)
372{ 373{
373 unsigned long clk_flags; 374 unsigned long clk_flags;
@@ -397,22 +398,27 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
397} 398}
398 399
399static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, 400static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
401 unsigned long min_rate, unsigned long max_rate,
400 unsigned long *p_rate, struct clk_hw **p) 402 unsigned long *p_rate, struct clk_hw **p)
401{ 403{
402 struct clk_rcg *rcg = to_clk_rcg(hw); 404 struct clk_rcg *rcg = to_clk_rcg(hw);
403 405
404 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p); 406 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
407 max_rate, p_rate, p);
405} 408}
406 409
407static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, 410static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
411 unsigned long min_rate, unsigned long max_rate,
408 unsigned long *p_rate, struct clk_hw **p) 412 unsigned long *p_rate, struct clk_hw **p)
409{ 413{
410 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 414 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
411 415
412 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p); 416 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
417 max_rate, p_rate, p);
413} 418}
414 419
415static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate, 420static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
421 unsigned long min_rate, unsigned long max_rate,
416 unsigned long *p_rate, struct clk_hw **p_hw) 422 unsigned long *p_rate, struct clk_hw **p_hw)
417{ 423{
418 struct clk_rcg *rcg = to_clk_rcg(hw); 424 struct clk_rcg *rcg = to_clk_rcg(hw);
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 08b8b3729f53..742acfa18d63 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -208,6 +208,7 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
208} 208}
209 209
210static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate, 210static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
211 unsigned long min_rate, unsigned long max_rate,
211 unsigned long *p_rate, struct clk_hw **p) 212 unsigned long *p_rate, struct clk_hw **p)
212{ 213{
213 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 214 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -361,6 +362,8 @@ static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
361} 362}
362 363
363static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, 364static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
365 unsigned long min_rate,
366 unsigned long max_rate,
364 unsigned long *p_rate, struct clk_hw **p) 367 unsigned long *p_rate, struct clk_hw **p)
365{ 368{
366 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 369 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -412,6 +415,7 @@ const struct clk_ops clk_edp_pixel_ops = {
412EXPORT_SYMBOL_GPL(clk_edp_pixel_ops); 415EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
413 416
414static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate, 417static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate,
418 unsigned long min_rate, unsigned long max_rate,
415 unsigned long *p_rate, struct clk_hw **p_hw) 419 unsigned long *p_rate, struct clk_hw **p_hw)
416{ 420{
417 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 421 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -476,6 +480,8 @@ static const struct frac_entry frac_table_pixel[] = {
476}; 480};
477 481
478static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, 482static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
483 unsigned long min_rate,
484 unsigned long max_rate,
479 unsigned long *p_rate, struct clk_hw **p) 485 unsigned long *p_rate, struct clk_hw **p)
480{ 486{
481 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 487 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
new file mode 100644
index 000000000000..53484912301e
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-divider.c
@@ -0,0 +1,70 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/bitops.h>
16#include <linux/regmap.h>
17#include <linux/export.h>
18
19#include "clk-regmap-divider.h"
20
21static inline struct clk_regmap_div *to_clk_regmap_div(struct clk_hw *hw)
22{
23 return container_of(to_clk_regmap(hw), struct clk_regmap_div, clkr);
24}
25
26static long div_round_rate(struct clk_hw *hw, unsigned long rate,
27 unsigned long *prate)
28{
29 struct clk_regmap_div *divider = to_clk_regmap_div(hw);
30
31 return divider_round_rate(hw, rate, prate, NULL, divider->width,
32 CLK_DIVIDER_ROUND_CLOSEST);
33}
34
35static int div_set_rate(struct clk_hw *hw, unsigned long rate,
36 unsigned long parent_rate)
37{
38 struct clk_regmap_div *divider = to_clk_regmap_div(hw);
39 struct clk_regmap *clkr = &divider->clkr;
40 u32 div;
41
42 div = divider_get_val(rate, parent_rate, NULL, divider->width,
43 CLK_DIVIDER_ROUND_CLOSEST);
44
45 return regmap_update_bits(clkr->regmap, divider->reg,
46 (BIT(divider->width) - 1) << divider->shift,
47 div << divider->shift);
48}
49
50static unsigned long div_recalc_rate(struct clk_hw *hw,
51 unsigned long parent_rate)
52{
53 struct clk_regmap_div *divider = to_clk_regmap_div(hw);
54 struct clk_regmap *clkr = &divider->clkr;
55 u32 div;
56
57 regmap_read(clkr->regmap, divider->reg, &div);
58 div >>= divider->shift;
59 div &= BIT(divider->width) - 1;
60
61 return divider_recalc_rate(hw, parent_rate, div, NULL,
62 CLK_DIVIDER_ROUND_CLOSEST);
63}
64
65const struct clk_ops clk_regmap_div_ops = {
66 .round_rate = div_round_rate,
67 .set_rate = div_set_rate,
68 .recalc_rate = div_recalc_rate,
69};
70EXPORT_SYMBOL_GPL(clk_regmap_div_ops);
diff --git a/drivers/clk/qcom/clk-regmap-divider.h b/drivers/clk/qcom/clk-regmap-divider.h
new file mode 100644
index 000000000000..fc4492e3a827
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-divider.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __QCOM_CLK_REGMAP_DIVIDER_H__
15#define __QCOM_CLK_REGMAP_DIVIDER_H__
16
17#include <linux/clk-provider.h>
18#include "clk-regmap.h"
19
20struct clk_regmap_div {
21 u32 reg;
22 u32 shift;
23 u32 width;
24 struct clk_regmap clkr;
25};
26
27extern const struct clk_ops clk_regmap_div_ops;
28
29#endif
diff --git a/drivers/clk/qcom/clk-regmap-mux.c b/drivers/clk/qcom/clk-regmap-mux.c
new file mode 100644
index 000000000000..cae3071f384c
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-mux.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/bitops.h>
16#include <linux/regmap.h>
17#include <linux/export.h>
18
19#include "clk-regmap-mux.h"
20
21static inline struct clk_regmap_mux *to_clk_regmap_mux(struct clk_hw *hw)
22{
23 return container_of(to_clk_regmap(hw), struct clk_regmap_mux, clkr);
24}
25
26static u8 mux_get_parent(struct clk_hw *hw)
27{
28 struct clk_regmap_mux *mux = to_clk_regmap_mux(hw);
29 struct clk_regmap *clkr = to_clk_regmap(hw);
30 unsigned int mask = GENMASK(mux->width - 1, 0);
31 unsigned int val;
32
33 regmap_read(clkr->regmap, mux->reg, &val);
34
35 val >>= mux->shift;
36 val &= mask;
37
38 return val;
39}
40
41static int mux_set_parent(struct clk_hw *hw, u8 index)
42{
43 struct clk_regmap_mux *mux = to_clk_regmap_mux(hw);
44 struct clk_regmap *clkr = to_clk_regmap(hw);
45 unsigned int mask = GENMASK(mux->width + mux->shift - 1, mux->shift);
46 unsigned int val;
47
48 val = index;
49 val <<= mux->shift;
50
51 return regmap_update_bits(clkr->regmap, mux->reg, mask, val);
52}
53
54const struct clk_ops clk_regmap_mux_closest_ops = {
55 .get_parent = mux_get_parent,
56 .set_parent = mux_set_parent,
57 .determine_rate = __clk_mux_determine_rate_closest,
58};
59EXPORT_SYMBOL_GPL(clk_regmap_mux_closest_ops);
diff --git a/drivers/clk/qcom/clk-regmap-mux.h b/drivers/clk/qcom/clk-regmap-mux.h
new file mode 100644
index 000000000000..5cec76154fda
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-mux.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __QCOM_CLK_REGMAP_MUX_H__
15#define __QCOM_CLK_REGMAP_MUX_H__
16
17#include <linux/clk-provider.h>
18#include "clk-regmap.h"
19
20struct clk_regmap_mux {
21 u32 reg;
22 u32 shift;
23 u32 width;
24 struct clk_regmap clkr;
25};
26
27extern const struct clk_ops clk_regmap_mux_closest_ops;
28
29#endif
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index afed5eb0691e..cbdc31dea7f4 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -75,6 +75,17 @@ static struct clk_pll pll3 = {
75 }, 75 },
76}; 76};
77 77
78static struct clk_regmap pll4_vote = {
79 .enable_reg = 0x34c0,
80 .enable_mask = BIT(4),
81 .hw.init = &(struct clk_init_data){
82 .name = "pll4_vote",
83 .parent_names = (const char *[]){ "pll4" },
84 .num_parents = 1,
85 .ops = &clk_pll_vote_ops,
86 },
87};
88
78static struct clk_pll pll8 = { 89static struct clk_pll pll8 = {
79 .l_reg = 0x3144, 90 .l_reg = 0x3144,
80 .m_reg = 0x3148, 91 .m_reg = 0x3148,
@@ -2163,6 +2174,7 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
2163 [PLL0] = &pll0.clkr, 2174 [PLL0] = &pll0.clkr,
2164 [PLL0_VOTE] = &pll0_vote, 2175 [PLL0_VOTE] = &pll0_vote,
2165 [PLL3] = &pll3.clkr, 2176 [PLL3] = &pll3.clkr,
2177 [PLL4_VOTE] = &pll4_vote,
2166 [PLL8] = &pll8.clkr, 2178 [PLL8] = &pll8.clkr,
2167 [PLL8_VOTE] = &pll8_vote, 2179 [PLL8_VOTE] = &pll8_vote,
2168 [PLL14] = &pll14.clkr, 2180 [PLL14] = &pll14.clkr,
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
new file mode 100644
index 000000000000..121ffde25dc3
--- /dev/null
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -0,0 +1,473 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/bitops.h>
16#include <linux/err.h>
17#include <linux/platform_device.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/of_device.h>
21#include <linux/clk-provider.h>
22#include <linux/regmap.h>
23
24#include <dt-bindings/clock/qcom,lcc-ipq806x.h>
25
26#include "common.h"
27#include "clk-regmap.h"
28#include "clk-pll.h"
29#include "clk-rcg.h"
30#include "clk-branch.h"
31#include "clk-regmap-divider.h"
32#include "clk-regmap-mux.h"
33
34static struct clk_pll pll4 = {
35 .l_reg = 0x4,
36 .m_reg = 0x8,
37 .n_reg = 0xc,
38 .config_reg = 0x14,
39 .mode_reg = 0x0,
40 .status_reg = 0x18,
41 .status_bit = 16,
42 .clkr.hw.init = &(struct clk_init_data){
43 .name = "pll4",
44 .parent_names = (const char *[]){ "pxo" },
45 .num_parents = 1,
46 .ops = &clk_pll_ops,
47 },
48};
49
50static const struct pll_config pll4_config = {
51 .l = 0xf,
52 .m = 0x91,
53 .n = 0xc7,
54 .vco_val = 0x0,
55 .vco_mask = BIT(17) | BIT(16),
56 .pre_div_val = 0x0,
57 .pre_div_mask = BIT(19),
58 .post_div_val = 0x0,
59 .post_div_mask = BIT(21) | BIT(20),
60 .mn_ena_mask = BIT(22),
61 .main_output_mask = BIT(23),
62};
63
64#define P_PXO 0
65#define P_PLL4 1
66
67static const u8 lcc_pxo_pll4_map[] = {
68 [P_PXO] = 0,
69 [P_PLL4] = 2,
70};
71
72static const char *lcc_pxo_pll4[] = {
73 "pxo",
74 "pll4_vote",
75};
76
77static struct freq_tbl clk_tbl_aif_mi2s[] = {
78 { 1024000, P_PLL4, 4, 1, 96 },
79 { 1411200, P_PLL4, 4, 2, 139 },
80 { 1536000, P_PLL4, 4, 1, 64 },
81 { 2048000, P_PLL4, 4, 1, 48 },
82 { 2116800, P_PLL4, 4, 2, 93 },
83 { 2304000, P_PLL4, 4, 2, 85 },
84 { 2822400, P_PLL4, 4, 6, 209 },
85 { 3072000, P_PLL4, 4, 1, 32 },
86 { 3175200, P_PLL4, 4, 1, 31 },
87 { 4096000, P_PLL4, 4, 1, 24 },
88 { 4233600, P_PLL4, 4, 9, 209 },
89 { 4608000, P_PLL4, 4, 3, 64 },
90 { 5644800, P_PLL4, 4, 12, 209 },
91 { 6144000, P_PLL4, 4, 1, 16 },
92 { 6350400, P_PLL4, 4, 2, 31 },
93 { 8192000, P_PLL4, 4, 1, 12 },
94 { 8467200, P_PLL4, 4, 18, 209 },
95 { 9216000, P_PLL4, 4, 3, 32 },
96 { 11289600, P_PLL4, 4, 24, 209 },
97 { 12288000, P_PLL4, 4, 1, 8 },
98 { 12700800, P_PLL4, 4, 27, 209 },
99 { 13824000, P_PLL4, 4, 9, 64 },
100 { 16384000, P_PLL4, 4, 1, 6 },
101 { 16934400, P_PLL4, 4, 41, 238 },
102 { 18432000, P_PLL4, 4, 3, 16 },
103 { 22579200, P_PLL4, 2, 24, 209 },
104 { 24576000, P_PLL4, 4, 1, 4 },
105 { 27648000, P_PLL4, 4, 9, 32 },
106 { 33868800, P_PLL4, 4, 41, 119 },
107 { 36864000, P_PLL4, 4, 3, 8 },
108 { 45158400, P_PLL4, 1, 24, 209 },
109 { 49152000, P_PLL4, 4, 1, 2 },
110 { 50803200, P_PLL4, 1, 27, 209 },
111 { }
112};
113
114static struct clk_rcg mi2s_osr_src = {
115 .ns_reg = 0x48,
116 .md_reg = 0x4c,
117 .mn = {
118 .mnctr_en_bit = 8,
119 .mnctr_reset_bit = 7,
120 .mnctr_mode_shift = 5,
121 .n_val_shift = 24,
122 .m_val_shift = 8,
123 .width = 8,
124 },
125 .p = {
126 .pre_div_shift = 3,
127 .pre_div_width = 2,
128 },
129 .s = {
130 .src_sel_shift = 0,
131 .parent_map = lcc_pxo_pll4_map,
132 },
133 .freq_tbl = clk_tbl_aif_mi2s,
134 .clkr = {
135 .enable_reg = 0x48,
136 .enable_mask = BIT(9),
137 .hw.init = &(struct clk_init_data){
138 .name = "mi2s_osr_src",
139 .parent_names = lcc_pxo_pll4,
140 .num_parents = 2,
141 .ops = &clk_rcg_ops,
142 .flags = CLK_SET_RATE_GATE,
143 },
144 },
145};
146
147static const char *lcc_mi2s_parents[] = {
148 "mi2s_osr_src",
149};
150
151static struct clk_branch mi2s_osr_clk = {
152 .halt_reg = 0x50,
153 .halt_bit = 1,
154 .halt_check = BRANCH_HALT_ENABLE,
155 .clkr = {
156 .enable_reg = 0x48,
157 .enable_mask = BIT(17),
158 .hw.init = &(struct clk_init_data){
159 .name = "mi2s_osr_clk",
160 .parent_names = lcc_mi2s_parents,
161 .num_parents = 1,
162 .ops = &clk_branch_ops,
163 .flags = CLK_SET_RATE_PARENT,
164 },
165 },
166};
167
168static struct clk_regmap_div mi2s_div_clk = {
169 .reg = 0x48,
170 .shift = 10,
171 .width = 4,
172 .clkr = {
173 .hw.init = &(struct clk_init_data){
174 .name = "mi2s_div_clk",
175 .parent_names = lcc_mi2s_parents,
176 .num_parents = 1,
177 .ops = &clk_regmap_div_ops,
178 },
179 },
180};
181
182static struct clk_branch mi2s_bit_div_clk = {
183 .halt_reg = 0x50,
184 .halt_bit = 0,
185 .halt_check = BRANCH_HALT_ENABLE,
186 .clkr = {
187 .enable_reg = 0x48,
188 .enable_mask = BIT(15),
189 .hw.init = &(struct clk_init_data){
190 .name = "mi2s_bit_div_clk",
191 .parent_names = (const char *[]){ "mi2s_div_clk" },
192 .num_parents = 1,
193 .ops = &clk_branch_ops,
194 .flags = CLK_SET_RATE_PARENT,
195 },
196 },
197};
198
199
200static struct clk_regmap_mux mi2s_bit_clk = {
201 .reg = 0x48,
202 .shift = 14,
203 .width = 1,
204 .clkr = {
205 .hw.init = &(struct clk_init_data){
206 .name = "mi2s_bit_clk",
207 .parent_names = (const char *[]){
208 "mi2s_bit_div_clk",
209 "mi2s_codec_clk",
210 },
211 .num_parents = 2,
212 .ops = &clk_regmap_mux_closest_ops,
213 .flags = CLK_SET_RATE_PARENT,
214 },
215 },
216};
217
218static struct freq_tbl clk_tbl_pcm[] = {
219 { 64000, P_PLL4, 4, 1, 1536 },
220 { 128000, P_PLL4, 4, 1, 768 },
221 { 256000, P_PLL4, 4, 1, 384 },
222 { 512000, P_PLL4, 4, 1, 192 },
223 { 1024000, P_PLL4, 4, 1, 96 },
224 { 2048000, P_PLL4, 4, 1, 48 },
225 { },
226};
227
228static struct clk_rcg pcm_src = {
229 .ns_reg = 0x54,
230 .md_reg = 0x58,
231 .mn = {
232 .mnctr_en_bit = 8,
233 .mnctr_reset_bit = 7,
234 .mnctr_mode_shift = 5,
235 .n_val_shift = 16,
236 .m_val_shift = 16,
237 .width = 16,
238 },
239 .p = {
240 .pre_div_shift = 3,
241 .pre_div_width = 2,
242 },
243 .s = {
244 .src_sel_shift = 0,
245 .parent_map = lcc_pxo_pll4_map,
246 },
247 .freq_tbl = clk_tbl_pcm,
248 .clkr = {
249 .enable_reg = 0x54,
250 .enable_mask = BIT(9),
251 .hw.init = &(struct clk_init_data){
252 .name = "pcm_src",
253 .parent_names = lcc_pxo_pll4,
254 .num_parents = 2,
255 .ops = &clk_rcg_ops,
256 .flags = CLK_SET_RATE_GATE,
257 },
258 },
259};
260
261static struct clk_branch pcm_clk_out = {
262 .halt_reg = 0x5c,
263 .halt_bit = 0,
264 .halt_check = BRANCH_HALT_ENABLE,
265 .clkr = {
266 .enable_reg = 0x54,
267 .enable_mask = BIT(11),
268 .hw.init = &(struct clk_init_data){
269 .name = "pcm_clk_out",
270 .parent_names = (const char *[]){ "pcm_src" },
271 .num_parents = 1,
272 .ops = &clk_branch_ops,
273 .flags = CLK_SET_RATE_PARENT,
274 },
275 },
276};
277
278static struct clk_regmap_mux pcm_clk = {
279 .reg = 0x54,
280 .shift = 10,
281 .width = 1,
282 .clkr = {
283 .hw.init = &(struct clk_init_data){
284 .name = "pcm_clk",
285 .parent_names = (const char *[]){
286 "pcm_clk_out",
287 "pcm_codec_clk",
288 },
289 .num_parents = 2,
290 .ops = &clk_regmap_mux_closest_ops,
291 .flags = CLK_SET_RATE_PARENT,
292 },
293 },
294};
295
296static struct freq_tbl clk_tbl_aif_osr[] = {
297 { 22050, P_PLL4, 1, 147, 20480 },
298 { 32000, P_PLL4, 1, 1, 96 },
299 { 44100, P_PLL4, 1, 147, 10240 },
300 { 48000, P_PLL4, 1, 1, 64 },
301 { 88200, P_PLL4, 1, 147, 5120 },
302 { 96000, P_PLL4, 1, 1, 32 },
303 { 176400, P_PLL4, 1, 147, 2560 },
304 { 192000, P_PLL4, 1, 1, 16 },
305 { },
306};
307
308static struct clk_rcg spdif_src = {
309 .ns_reg = 0xcc,
310 .md_reg = 0xd0,
311 .mn = {
312 .mnctr_en_bit = 8,
313 .mnctr_reset_bit = 7,
314 .mnctr_mode_shift = 5,
315 .n_val_shift = 16,
316 .m_val_shift = 16,
317 .width = 8,
318 },
319 .p = {
320 .pre_div_shift = 3,
321 .pre_div_width = 2,
322 },
323 .s = {
324 .src_sel_shift = 0,
325 .parent_map = lcc_pxo_pll4_map,
326 },
327 .freq_tbl = clk_tbl_aif_osr,
328 .clkr = {
329 .enable_reg = 0xcc,
330 .enable_mask = BIT(9),
331 .hw.init = &(struct clk_init_data){
332 .name = "spdif_src",
333 .parent_names = lcc_pxo_pll4,
334 .num_parents = 2,
335 .ops = &clk_rcg_ops,
336 .flags = CLK_SET_RATE_GATE,
337 },
338 },
339};
340
341static const char *lcc_spdif_parents[] = {
342 "spdif_src",
343};
344
345static struct clk_branch spdif_clk = {
346 .halt_reg = 0xd4,
347 .halt_bit = 1,
348 .halt_check = BRANCH_HALT_ENABLE,
349 .clkr = {
350 .enable_reg = 0xcc,
351 .enable_mask = BIT(12),
352 .hw.init = &(struct clk_init_data){
353 .name = "spdif_clk",
354 .parent_names = lcc_spdif_parents,
355 .num_parents = 1,
356 .ops = &clk_branch_ops,
357 .flags = CLK_SET_RATE_PARENT,
358 },
359 },
360};
361
362static struct freq_tbl clk_tbl_ahbix[] = {
363 { 131072, P_PLL4, 1, 1, 3 },
364 { },
365};
366
367static struct clk_rcg ahbix_clk = {
368 .ns_reg = 0x38,
369 .md_reg = 0x3c,
370 .mn = {
371 .mnctr_en_bit = 8,
372 .mnctr_reset_bit = 7,
373 .mnctr_mode_shift = 5,
374 .n_val_shift = 24,
375 .m_val_shift = 8,
376 .width = 8,
377 },
378 .p = {
379 .pre_div_shift = 3,
380 .pre_div_width = 2,
381 },
382 .s = {
383 .src_sel_shift = 0,
384 .parent_map = lcc_pxo_pll4_map,
385 },
386 .freq_tbl = clk_tbl_ahbix,
387 .clkr = {
388 .enable_reg = 0x38,
389 .enable_mask = BIT(10), /* toggle the gfmux to select mn/pxo */
390 .hw.init = &(struct clk_init_data){
391 .name = "ahbix",
392 .parent_names = lcc_pxo_pll4,
393 .num_parents = 2,
394 .ops = &clk_rcg_ops,
395 .flags = CLK_SET_RATE_GATE,
396 },
397 },
398};
399
400static struct clk_regmap *lcc_ipq806x_clks[] = {
401 [PLL4] = &pll4.clkr,
402 [MI2S_OSR_SRC] = &mi2s_osr_src.clkr,
403 [MI2S_OSR_CLK] = &mi2s_osr_clk.clkr,
404 [MI2S_DIV_CLK] = &mi2s_div_clk.clkr,
405 [MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr,
406 [MI2S_BIT_CLK] = &mi2s_bit_clk.clkr,
407 [PCM_SRC] = &pcm_src.clkr,
408 [PCM_CLK_OUT] = &pcm_clk_out.clkr,
409 [PCM_CLK] = &pcm_clk.clkr,
410 [SPDIF_SRC] = &spdif_src.clkr,
411 [SPDIF_CLK] = &spdif_clk.clkr,
412 [AHBIX_CLK] = &ahbix_clk.clkr,
413};
414
415static const struct regmap_config lcc_ipq806x_regmap_config = {
416 .reg_bits = 32,
417 .reg_stride = 4,
418 .val_bits = 32,
419 .max_register = 0xfc,
420 .fast_io = true,
421};
422
423static const struct qcom_cc_desc lcc_ipq806x_desc = {
424 .config = &lcc_ipq806x_regmap_config,
425 .clks = lcc_ipq806x_clks,
426 .num_clks = ARRAY_SIZE(lcc_ipq806x_clks),
427};
428
429static const struct of_device_id lcc_ipq806x_match_table[] = {
430 { .compatible = "qcom,lcc-ipq8064" },
431 { }
432};
433MODULE_DEVICE_TABLE(of, lcc_ipq806x_match_table);
434
435static int lcc_ipq806x_probe(struct platform_device *pdev)
436{
437 u32 val;
438 struct regmap *regmap;
439
440 regmap = qcom_cc_map(pdev, &lcc_ipq806x_desc);
441 if (IS_ERR(regmap))
442 return PTR_ERR(regmap);
443
444 /* Configure the rate of PLL4 if the bootloader hasn't already */
445 val = regmap_read(regmap, 0x0, &val);
446 if (!val)
447 clk_pll_configure_sr(&pll4, regmap, &pll4_config, true);
448 /* Enable PLL4 source on the LPASS Primary PLL Mux */
449 regmap_write(regmap, 0xc4, 0x1);
450
451 return qcom_cc_really_probe(pdev, &lcc_ipq806x_desc, regmap);
452}
453
454static int lcc_ipq806x_remove(struct platform_device *pdev)
455{
456 qcom_cc_remove(pdev);
457 return 0;
458}
459
460static struct platform_driver lcc_ipq806x_driver = {
461 .probe = lcc_ipq806x_probe,
462 .remove = lcc_ipq806x_remove,
463 .driver = {
464 .name = "lcc-ipq806x",
465 .owner = THIS_MODULE,
466 .of_match_table = lcc_ipq806x_match_table,
467 },
468};
469module_platform_driver(lcc_ipq806x_driver);
470
471MODULE_DESCRIPTION("QCOM LCC IPQ806x Driver");
472MODULE_LICENSE("GPL v2");
473MODULE_ALIAS("platform:lcc-ipq806x");
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
new file mode 100644
index 000000000000..a75a408cfccd
--- /dev/null
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -0,0 +1,585 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/bitops.h>
16#include <linux/err.h>
17#include <linux/platform_device.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/of_device.h>
21#include <linux/clk-provider.h>
22#include <linux/regmap.h>
23
24#include <dt-bindings/clock/qcom,lcc-msm8960.h>
25
26#include "common.h"
27#include "clk-regmap.h"
28#include "clk-pll.h"
29#include "clk-rcg.h"
30#include "clk-branch.h"
31#include "clk-regmap-divider.h"
32#include "clk-regmap-mux.h"
33
34static struct clk_pll pll4 = {
35 .l_reg = 0x4,
36 .m_reg = 0x8,
37 .n_reg = 0xc,
38 .config_reg = 0x14,
39 .mode_reg = 0x0,
40 .status_reg = 0x18,
41 .status_bit = 16,
42 .clkr.hw.init = &(struct clk_init_data){
43 .name = "pll4",
44 .parent_names = (const char *[]){ "pxo" },
45 .num_parents = 1,
46 .ops = &clk_pll_ops,
47 },
48};
49
50#define P_PXO 0
51#define P_PLL4 1
52
53static const u8 lcc_pxo_pll4_map[] = {
54 [P_PXO] = 0,
55 [P_PLL4] = 2,
56};
57
58static const char *lcc_pxo_pll4[] = {
59 "pxo",
60 "pll4_vote",
61};
62
63static struct freq_tbl clk_tbl_aif_osr_492[] = {
64 { 512000, P_PLL4, 4, 1, 240 },
65 { 768000, P_PLL4, 4, 1, 160 },
66 { 1024000, P_PLL4, 4, 1, 120 },
67 { 1536000, P_PLL4, 4, 1, 80 },
68 { 2048000, P_PLL4, 4, 1, 60 },
69 { 3072000, P_PLL4, 4, 1, 40 },
70 { 4096000, P_PLL4, 4, 1, 30 },
71 { 6144000, P_PLL4, 4, 1, 20 },
72 { 8192000, P_PLL4, 4, 1, 15 },
73 { 12288000, P_PLL4, 4, 1, 10 },
74 { 24576000, P_PLL4, 4, 1, 5 },
75 { 27000000, P_PXO, 1, 0, 0 },
76 { }
77};
78
79static struct freq_tbl clk_tbl_aif_osr_393[] = {
80 { 512000, P_PLL4, 4, 1, 192 },
81 { 768000, P_PLL4, 4, 1, 128 },
82 { 1024000, P_PLL4, 4, 1, 96 },
83 { 1536000, P_PLL4, 4, 1, 64 },
84 { 2048000, P_PLL4, 4, 1, 48 },
85 { 3072000, P_PLL4, 4, 1, 32 },
86 { 4096000, P_PLL4, 4, 1, 24 },
87 { 6144000, P_PLL4, 4, 1, 16 },
88 { 8192000, P_PLL4, 4, 1, 12 },
89 { 12288000, P_PLL4, 4, 1, 8 },
90 { 24576000, P_PLL4, 4, 1, 4 },
91 { 27000000, P_PXO, 1, 0, 0 },
92 { }
93};
94
95static struct clk_rcg mi2s_osr_src = {
96 .ns_reg = 0x48,
97 .md_reg = 0x4c,
98 .mn = {
99 .mnctr_en_bit = 8,
100 .mnctr_reset_bit = 7,
101 .mnctr_mode_shift = 5,
102 .n_val_shift = 24,
103 .m_val_shift = 8,
104 .width = 8,
105 },
106 .p = {
107 .pre_div_shift = 3,
108 .pre_div_width = 2,
109 },
110 .s = {
111 .src_sel_shift = 0,
112 .parent_map = lcc_pxo_pll4_map,
113 },
114 .freq_tbl = clk_tbl_aif_osr_393,
115 .clkr = {
116 .enable_reg = 0x48,
117 .enable_mask = BIT(9),
118 .hw.init = &(struct clk_init_data){
119 .name = "mi2s_osr_src",
120 .parent_names = lcc_pxo_pll4,
121 .num_parents = 2,
122 .ops = &clk_rcg_ops,
123 .flags = CLK_SET_RATE_GATE,
124 },
125 },
126};
127
128static const char *lcc_mi2s_parents[] = {
129 "mi2s_osr_src",
130};
131
132static struct clk_branch mi2s_osr_clk = {
133 .halt_reg = 0x50,
134 .halt_bit = 1,
135 .halt_check = BRANCH_HALT_ENABLE,
136 .clkr = {
137 .enable_reg = 0x48,
138 .enable_mask = BIT(17),
139 .hw.init = &(struct clk_init_data){
140 .name = "mi2s_osr_clk",
141 .parent_names = lcc_mi2s_parents,
142 .num_parents = 1,
143 .ops = &clk_branch_ops,
144 .flags = CLK_SET_RATE_PARENT,
145 },
146 },
147};
148
149static struct clk_regmap_div mi2s_div_clk = {
150 .reg = 0x48,
151 .shift = 10,
152 .width = 4,
153 .clkr = {
154 .enable_reg = 0x48,
155 .enable_mask = BIT(15),
156 .hw.init = &(struct clk_init_data){
157 .name = "mi2s_div_clk",
158 .parent_names = lcc_mi2s_parents,
159 .num_parents = 1,
160 .ops = &clk_regmap_div_ops,
161 },
162 },
163};
164
165static struct clk_branch mi2s_bit_div_clk = {
166 .halt_reg = 0x50,
167 .halt_bit = 0,
168 .halt_check = BRANCH_HALT_ENABLE,
169 .clkr = {
170 .enable_reg = 0x48,
171 .enable_mask = BIT(15),
172 .hw.init = &(struct clk_init_data){
173 .name = "mi2s_bit_div_clk",
174 .parent_names = (const char *[]){ "mi2s_div_clk" },
175 .num_parents = 1,
176 .ops = &clk_branch_ops,
177 .flags = CLK_SET_RATE_PARENT,
178 },
179 },
180};
181
182static struct clk_regmap_mux mi2s_bit_clk = {
183 .reg = 0x48,
184 .shift = 14,
185 .width = 1,
186 .clkr = {
187 .hw.init = &(struct clk_init_data){
188 .name = "mi2s_bit_clk",
189 .parent_names = (const char *[]){
190 "mi2s_bit_div_clk",
191 "mi2s_codec_clk",
192 },
193 .num_parents = 2,
194 .ops = &clk_regmap_mux_closest_ops,
195 .flags = CLK_SET_RATE_PARENT,
196 },
197 },
198};
199
200#define CLK_AIF_OSR_DIV(prefix, _ns, _md, hr) \
201static struct clk_rcg prefix##_osr_src = { \
202 .ns_reg = _ns, \
203 .md_reg = _md, \
204 .mn = { \
205 .mnctr_en_bit = 8, \
206 .mnctr_reset_bit = 7, \
207 .mnctr_mode_shift = 5, \
208 .n_val_shift = 24, \
209 .m_val_shift = 8, \
210 .width = 8, \
211 }, \
212 .p = { \
213 .pre_div_shift = 3, \
214 .pre_div_width = 2, \
215 }, \
216 .s = { \
217 .src_sel_shift = 0, \
218 .parent_map = lcc_pxo_pll4_map, \
219 }, \
220 .freq_tbl = clk_tbl_aif_osr_393, \
221 .clkr = { \
222 .enable_reg = _ns, \
223 .enable_mask = BIT(9), \
224 .hw.init = &(struct clk_init_data){ \
225 .name = #prefix "_osr_src", \
226 .parent_names = lcc_pxo_pll4, \
227 .num_parents = 2, \
228 .ops = &clk_rcg_ops, \
229 .flags = CLK_SET_RATE_GATE, \
230 }, \
231 }, \
232}; \
233 \
234static const char *lcc_##prefix##_parents[] = { \
235 #prefix "_osr_src", \
236}; \
237 \
238static struct clk_branch prefix##_osr_clk = { \
239 .halt_reg = hr, \
240 .halt_bit = 1, \
241 .halt_check = BRANCH_HALT_ENABLE, \
242 .clkr = { \
243 .enable_reg = _ns, \
244 .enable_mask = BIT(21), \
245 .hw.init = &(struct clk_init_data){ \
246 .name = #prefix "_osr_clk", \
247 .parent_names = lcc_##prefix##_parents, \
248 .num_parents = 1, \
249 .ops = &clk_branch_ops, \
250 .flags = CLK_SET_RATE_PARENT, \
251 }, \
252 }, \
253}; \
254 \
255static struct clk_regmap_div prefix##_div_clk = { \
256 .reg = _ns, \
257 .shift = 10, \
258 .width = 8, \
259 .clkr = { \
260 .hw.init = &(struct clk_init_data){ \
261 .name = #prefix "_div_clk", \
262 .parent_names = lcc_##prefix##_parents, \
263 .num_parents = 1, \
264 .ops = &clk_regmap_div_ops, \
265 }, \
266 }, \
267}; \
268 \
269static struct clk_branch prefix##_bit_div_clk = { \
270 .halt_reg = hr, \
271 .halt_bit = 0, \
272 .halt_check = BRANCH_HALT_ENABLE, \
273 .clkr = { \
274 .enable_reg = _ns, \
275 .enable_mask = BIT(19), \
276 .hw.init = &(struct clk_init_data){ \
277 .name = #prefix "_bit_div_clk", \
278 .parent_names = (const char *[]){ \
279 #prefix "_div_clk" \
280 }, \
281 .num_parents = 1, \
282 .ops = &clk_branch_ops, \
283 .flags = CLK_SET_RATE_PARENT, \
284 }, \
285 }, \
286}; \
287 \
288static struct clk_regmap_mux prefix##_bit_clk = { \
289 .reg = _ns, \
290 .shift = 18, \
291 .width = 1, \
292 .clkr = { \
293 .hw.init = &(struct clk_init_data){ \
294 .name = #prefix "_bit_clk", \
295 .parent_names = (const char *[]){ \
296 #prefix "_bit_div_clk", \
297 #prefix "_codec_clk", \
298 }, \
299 .num_parents = 2, \
300 .ops = &clk_regmap_mux_closest_ops, \
301 .flags = CLK_SET_RATE_PARENT, \
302 }, \
303 }, \
304}
305
306CLK_AIF_OSR_DIV(codec_i2s_mic, 0x60, 0x64, 0x68);
307CLK_AIF_OSR_DIV(spare_i2s_mic, 0x78, 0x7c, 0x80);
308CLK_AIF_OSR_DIV(codec_i2s_spkr, 0x6c, 0x70, 0x74);
309CLK_AIF_OSR_DIV(spare_i2s_spkr, 0x84, 0x88, 0x8c);
310
311static struct freq_tbl clk_tbl_pcm_492[] = {
312 { 256000, P_PLL4, 4, 1, 480 },
313 { 512000, P_PLL4, 4, 1, 240 },
314 { 768000, P_PLL4, 4, 1, 160 },
315 { 1024000, P_PLL4, 4, 1, 120 },
316 { 1536000, P_PLL4, 4, 1, 80 },
317 { 2048000, P_PLL4, 4, 1, 60 },
318 { 3072000, P_PLL4, 4, 1, 40 },
319 { 4096000, P_PLL4, 4, 1, 30 },
320 { 6144000, P_PLL4, 4, 1, 20 },
321 { 8192000, P_PLL4, 4, 1, 15 },
322 { 12288000, P_PLL4, 4, 1, 10 },
323 { 24576000, P_PLL4, 4, 1, 5 },
324 { 27000000, P_PXO, 1, 0, 0 },
325 { }
326};
327
328static struct freq_tbl clk_tbl_pcm_393[] = {
329 { 256000, P_PLL4, 4, 1, 384 },
330 { 512000, P_PLL4, 4, 1, 192 },
331 { 768000, P_PLL4, 4, 1, 128 },
332 { 1024000, P_PLL4, 4, 1, 96 },
333 { 1536000, P_PLL4, 4, 1, 64 },
334 { 2048000, P_PLL4, 4, 1, 48 },
335 { 3072000, P_PLL4, 4, 1, 32 },
336 { 4096000, P_PLL4, 4, 1, 24 },
337 { 6144000, P_PLL4, 4, 1, 16 },
338 { 8192000, P_PLL4, 4, 1, 12 },
339 { 12288000, P_PLL4, 4, 1, 8 },
340 { 24576000, P_PLL4, 4, 1, 4 },
341 { 27000000, P_PXO, 1, 0, 0 },
342 { }
343};
344
345static struct clk_rcg pcm_src = {
346 .ns_reg = 0x54,
347 .md_reg = 0x58,
348 .mn = {
349 .mnctr_en_bit = 8,
350 .mnctr_reset_bit = 7,
351 .mnctr_mode_shift = 5,
352 .n_val_shift = 16,
353 .m_val_shift = 16,
354 .width = 16,
355 },
356 .p = {
357 .pre_div_shift = 3,
358 .pre_div_width = 2,
359 },
360 .s = {
361 .src_sel_shift = 0,
362 .parent_map = lcc_pxo_pll4_map,
363 },
364 .freq_tbl = clk_tbl_pcm_393,
365 .clkr = {
366 .enable_reg = 0x54,
367 .enable_mask = BIT(9),
368 .hw.init = &(struct clk_init_data){
369 .name = "pcm_src",
370 .parent_names = lcc_pxo_pll4,
371 .num_parents = 2,
372 .ops = &clk_rcg_ops,
373 .flags = CLK_SET_RATE_GATE,
374 },
375 },
376};
377
378static struct clk_branch pcm_clk_out = {
379 .halt_reg = 0x5c,
380 .halt_bit = 0,
381 .halt_check = BRANCH_HALT_ENABLE,
382 .clkr = {
383 .enable_reg = 0x54,
384 .enable_mask = BIT(11),
385 .hw.init = &(struct clk_init_data){
386 .name = "pcm_clk_out",
387 .parent_names = (const char *[]){ "pcm_src" },
388 .num_parents = 1,
389 .ops = &clk_branch_ops,
390 .flags = CLK_SET_RATE_PARENT,
391 },
392 },
393};
394
395static struct clk_regmap_mux pcm_clk = {
396 .reg = 0x54,
397 .shift = 10,
398 .width = 1,
399 .clkr = {
400 .hw.init = &(struct clk_init_data){
401 .name = "pcm_clk",
402 .parent_names = (const char *[]){
403 "pcm_clk_out",
404 "pcm_codec_clk",
405 },
406 .num_parents = 2,
407 .ops = &clk_regmap_mux_closest_ops,
408 .flags = CLK_SET_RATE_PARENT,
409 },
410 },
411};
412
413static struct clk_rcg slimbus_src = {
414 .ns_reg = 0xcc,
415 .md_reg = 0xd0,
416 .mn = {
417 .mnctr_en_bit = 8,
418 .mnctr_reset_bit = 7,
419 .mnctr_mode_shift = 5,
420 .n_val_shift = 16,
421 .m_val_shift = 16,
422 .width = 8,
423 },
424 .p = {
425 .pre_div_shift = 3,
426 .pre_div_width = 2,
427 },
428 .s = {
429 .src_sel_shift = 0,
430 .parent_map = lcc_pxo_pll4_map,
431 },
432 .freq_tbl = clk_tbl_aif_osr_393,
433 .clkr = {
434 .enable_reg = 0xcc,
435 .enable_mask = BIT(9),
436 .hw.init = &(struct clk_init_data){
437 .name = "slimbus_src",
438 .parent_names = lcc_pxo_pll4,
439 .num_parents = 2,
440 .ops = &clk_rcg_ops,
441 .flags = CLK_SET_RATE_GATE,
442 },
443 },
444};
445
446static const char *lcc_slimbus_parents[] = {
447 "slimbus_src",
448};
449
450static struct clk_branch audio_slimbus_clk = {
451 .halt_reg = 0xd4,
452 .halt_bit = 0,
453 .halt_check = BRANCH_HALT_ENABLE,
454 .clkr = {
455 .enable_reg = 0xcc,
456 .enable_mask = BIT(10),
457 .hw.init = &(struct clk_init_data){
458 .name = "audio_slimbus_clk",
459 .parent_names = lcc_slimbus_parents,
460 .num_parents = 1,
461 .ops = &clk_branch_ops,
462 .flags = CLK_SET_RATE_PARENT,
463 },
464 },
465};
466
467static struct clk_branch sps_slimbus_clk = {
468 .halt_reg = 0xd4,
469 .halt_bit = 1,
470 .halt_check = BRANCH_HALT_ENABLE,
471 .clkr = {
472 .enable_reg = 0xcc,
473 .enable_mask = BIT(12),
474 .hw.init = &(struct clk_init_data){
475 .name = "sps_slimbus_clk",
476 .parent_names = lcc_slimbus_parents,
477 .num_parents = 1,
478 .ops = &clk_branch_ops,
479 .flags = CLK_SET_RATE_PARENT,
480 },
481 },
482};
483
484static struct clk_regmap *lcc_msm8960_clks[] = {
485 [PLL4] = &pll4.clkr,
486 [MI2S_OSR_SRC] = &mi2s_osr_src.clkr,
487 [MI2S_OSR_CLK] = &mi2s_osr_clk.clkr,
488 [MI2S_DIV_CLK] = &mi2s_div_clk.clkr,
489 [MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr,
490 [MI2S_BIT_CLK] = &mi2s_bit_clk.clkr,
491 [PCM_SRC] = &pcm_src.clkr,
492 [PCM_CLK_OUT] = &pcm_clk_out.clkr,
493 [PCM_CLK] = &pcm_clk.clkr,
494 [SLIMBUS_SRC] = &slimbus_src.clkr,
495 [AUDIO_SLIMBUS_CLK] = &audio_slimbus_clk.clkr,
496 [SPS_SLIMBUS_CLK] = &sps_slimbus_clk.clkr,
497 [CODEC_I2S_MIC_OSR_SRC] = &codec_i2s_mic_osr_src.clkr,
498 [CODEC_I2S_MIC_OSR_CLK] = &codec_i2s_mic_osr_clk.clkr,
499 [CODEC_I2S_MIC_DIV_CLK] = &codec_i2s_mic_div_clk.clkr,
500 [CODEC_I2S_MIC_BIT_DIV_CLK] = &codec_i2s_mic_bit_div_clk.clkr,
501 [CODEC_I2S_MIC_BIT_CLK] = &codec_i2s_mic_bit_clk.clkr,
502 [SPARE_I2S_MIC_OSR_SRC] = &spare_i2s_mic_osr_src.clkr,
503 [SPARE_I2S_MIC_OSR_CLK] = &spare_i2s_mic_osr_clk.clkr,
504 [SPARE_I2S_MIC_DIV_CLK] = &spare_i2s_mic_div_clk.clkr,
505 [SPARE_I2S_MIC_BIT_DIV_CLK] = &spare_i2s_mic_bit_div_clk.clkr,
506 [SPARE_I2S_MIC_BIT_CLK] = &spare_i2s_mic_bit_clk.clkr,
507 [CODEC_I2S_SPKR_OSR_SRC] = &codec_i2s_spkr_osr_src.clkr,
508 [CODEC_I2S_SPKR_OSR_CLK] = &codec_i2s_spkr_osr_clk.clkr,
509 [CODEC_I2S_SPKR_DIV_CLK] = &codec_i2s_spkr_div_clk.clkr,
510 [CODEC_I2S_SPKR_BIT_DIV_CLK] = &codec_i2s_spkr_bit_div_clk.clkr,
511 [CODEC_I2S_SPKR_BIT_CLK] = &codec_i2s_spkr_bit_clk.clkr,
512 [SPARE_I2S_SPKR_OSR_SRC] = &spare_i2s_spkr_osr_src.clkr,
513 [SPARE_I2S_SPKR_OSR_CLK] = &spare_i2s_spkr_osr_clk.clkr,
514 [SPARE_I2S_SPKR_DIV_CLK] = &spare_i2s_spkr_div_clk.clkr,
515 [SPARE_I2S_SPKR_BIT_DIV_CLK] = &spare_i2s_spkr_bit_div_clk.clkr,
516 [SPARE_I2S_SPKR_BIT_CLK] = &spare_i2s_spkr_bit_clk.clkr,
517};
518
519static const struct regmap_config lcc_msm8960_regmap_config = {
520 .reg_bits = 32,
521 .reg_stride = 4,
522 .val_bits = 32,
523 .max_register = 0xfc,
524 .fast_io = true,
525};
526
527static const struct qcom_cc_desc lcc_msm8960_desc = {
528 .config = &lcc_msm8960_regmap_config,
529 .clks = lcc_msm8960_clks,
530 .num_clks = ARRAY_SIZE(lcc_msm8960_clks),
531};
532
533static const struct of_device_id lcc_msm8960_match_table[] = {
534 { .compatible = "qcom,lcc-msm8960" },
535 { .compatible = "qcom,lcc-apq8064" },
536 { }
537};
538MODULE_DEVICE_TABLE(of, lcc_msm8960_match_table);
539
540static int lcc_msm8960_probe(struct platform_device *pdev)
541{
542 u32 val;
543 struct regmap *regmap;
544
545 regmap = qcom_cc_map(pdev, &lcc_msm8960_desc);
546 if (IS_ERR(regmap))
547 return PTR_ERR(regmap);
548
549 /* Use the correct frequency plan depending on speed of PLL4 */
550 val = regmap_read(regmap, 0x4, &val);
551 if (val == 0x12) {
552 slimbus_src.freq_tbl = clk_tbl_aif_osr_492;
553 mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492;
554 codec_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492;
555 spare_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492;
556 codec_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492;
557 spare_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492;
558 pcm_src.freq_tbl = clk_tbl_pcm_492;
559 }
560 /* Enable PLL4 source on the LPASS Primary PLL Mux */
561 regmap_write(regmap, 0xc4, 0x1);
562
563 return qcom_cc_really_probe(pdev, &lcc_msm8960_desc, regmap);
564}
565
566static int lcc_msm8960_remove(struct platform_device *pdev)
567{
568 qcom_cc_remove(pdev);
569 return 0;
570}
571
572static struct platform_driver lcc_msm8960_driver = {
573 .probe = lcc_msm8960_probe,
574 .remove = lcc_msm8960_remove,
575 .driver = {
576 .name = "lcc-msm8960",
577 .owner = THIS_MODULE,
578 .of_match_table = lcc_msm8960_match_table,
579 },
580};
581module_platform_driver(lcc_msm8960_driver);
582
583MODULE_DESCRIPTION("QCOM LCC MSM8960 Driver");
584MODULE_LICENSE("GPL v2");
585MODULE_ALIAS("platform:lcc-msm8960");
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index cbcddcc02475..05d7a0bc0599 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -535,44 +535,44 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
535 COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gll_usb_npll_p, 0, 535 COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gll_usb_npll_p, 0,
536 RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS, 536 RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS,
537 RK3288_CLKGATE_CON(1), 8, GFLAGS), 537 RK3288_CLKGATE_CON(1), 8, GFLAGS),
538 COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0, 538 COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
539 RK3288_CLKSEL_CON(17), 0, 539 RK3288_CLKSEL_CON(17), 0,
540 RK3288_CLKGATE_CON(1), 9, GFLAGS), 540 RK3288_CLKGATE_CON(1), 9, GFLAGS),
541 MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, 0, 541 MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
542 RK3288_CLKSEL_CON(13), 8, 2, MFLAGS), 542 RK3288_CLKSEL_CON(13), 8, 2, MFLAGS),
543 MUX(0, "uart_src", mux_pll_src_cpll_gpll_p, 0, 543 MUX(0, "uart_src", mux_pll_src_cpll_gpll_p, 0,
544 RK3288_CLKSEL_CON(13), 15, 1, MFLAGS), 544 RK3288_CLKSEL_CON(13), 15, 1, MFLAGS),
545 COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0, 545 COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0,
546 RK3288_CLKSEL_CON(14), 0, 7, DFLAGS, 546 RK3288_CLKSEL_CON(14), 0, 7, DFLAGS,
547 RK3288_CLKGATE_CON(1), 10, GFLAGS), 547 RK3288_CLKGATE_CON(1), 10, GFLAGS),
548 COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", 0, 548 COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
549 RK3288_CLKSEL_CON(18), 0, 549 RK3288_CLKSEL_CON(18), 0,
550 RK3288_CLKGATE_CON(1), 11, GFLAGS), 550 RK3288_CLKGATE_CON(1), 11, GFLAGS),
551 MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, 0, 551 MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
552 RK3288_CLKSEL_CON(14), 8, 2, MFLAGS), 552 RK3288_CLKSEL_CON(14), 8, 2, MFLAGS),
553 COMPOSITE_NOMUX(0, "uart2_src", "uart_src", 0, 553 COMPOSITE_NOMUX(0, "uart2_src", "uart_src", 0,
554 RK3288_CLKSEL_CON(15), 0, 7, DFLAGS, 554 RK3288_CLKSEL_CON(15), 0, 7, DFLAGS,
555 RK3288_CLKGATE_CON(1), 12, GFLAGS), 555 RK3288_CLKGATE_CON(1), 12, GFLAGS),
556 COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", 0, 556 COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT,
557 RK3288_CLKSEL_CON(19), 0, 557 RK3288_CLKSEL_CON(19), 0,
558 RK3288_CLKGATE_CON(1), 13, GFLAGS), 558 RK3288_CLKGATE_CON(1), 13, GFLAGS),
559 MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, 0, 559 MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
560 RK3288_CLKSEL_CON(15), 8, 2, MFLAGS), 560 RK3288_CLKSEL_CON(15), 8, 2, MFLAGS),
561 COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0, 561 COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0,
562 RK3288_CLKSEL_CON(16), 0, 7, DFLAGS, 562 RK3288_CLKSEL_CON(16), 0, 7, DFLAGS,
563 RK3288_CLKGATE_CON(1), 14, GFLAGS), 563 RK3288_CLKGATE_CON(1), 14, GFLAGS),
564 COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", 0, 564 COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", CLK_SET_RATE_PARENT,
565 RK3288_CLKSEL_CON(20), 0, 565 RK3288_CLKSEL_CON(20), 0,
566 RK3288_CLKGATE_CON(1), 15, GFLAGS), 566 RK3288_CLKGATE_CON(1), 15, GFLAGS),
567 MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, 0, 567 MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT,
568 RK3288_CLKSEL_CON(16), 8, 2, MFLAGS), 568 RK3288_CLKSEL_CON(16), 8, 2, MFLAGS),
569 COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0, 569 COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0,
570 RK3288_CLKSEL_CON(3), 0, 7, DFLAGS, 570 RK3288_CLKSEL_CON(3), 0, 7, DFLAGS,
571 RK3288_CLKGATE_CON(2), 12, GFLAGS), 571 RK3288_CLKGATE_CON(2), 12, GFLAGS),
572 COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", 0, 572 COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", CLK_SET_RATE_PARENT,
573 RK3288_CLKSEL_CON(7), 0, 573 RK3288_CLKSEL_CON(7), 0,
574 RK3288_CLKGATE_CON(2), 13, GFLAGS), 574 RK3288_CLKGATE_CON(2), 13, GFLAGS),
575 MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, 0, 575 MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, CLK_SET_RATE_PARENT,
576 RK3288_CLKSEL_CON(3), 8, 2, MFLAGS), 576 RK3288_CLKSEL_CON(3), 8, 2, MFLAGS),
577 577
578 COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0, 578 COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
@@ -598,7 +598,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
598 GATE(0, "jtag", "ext_jtag", 0, 598 GATE(0, "jtag", "ext_jtag", 0,
599 RK3288_CLKGATE_CON(4), 14, GFLAGS), 599 RK3288_CLKGATE_CON(4), 14, GFLAGS),
600 600
601 COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0, 601 COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
602 RK3288_CLKSEL_CON(13), 11, 2, MFLAGS, 602 RK3288_CLKSEL_CON(13), 11, 2, MFLAGS,
603 RK3288_CLKGATE_CON(5), 14, GFLAGS), 603 RK3288_CLKGATE_CON(5), 14, GFLAGS),
604 COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0, 604 COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
@@ -704,8 +704,8 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
704 704
705 GATE(SCLK_LCDC_PWM0, "sclk_lcdc_pwm0", "xin24m", 0, RK3288_CLKGATE_CON(13), 10, GFLAGS), 705 GATE(SCLK_LCDC_PWM0, "sclk_lcdc_pwm0", "xin24m", 0, RK3288_CLKGATE_CON(13), 10, GFLAGS),
706 GATE(SCLK_LCDC_PWM1, "sclk_lcdc_pwm1", "xin24m", 0, RK3288_CLKGATE_CON(13), 11, GFLAGS), 706 GATE(SCLK_LCDC_PWM1, "sclk_lcdc_pwm1", "xin24m", 0, RK3288_CLKGATE_CON(13), 11, GFLAGS),
707 GATE(0, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS), 707 GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS),
708 GATE(0, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS), 708 GATE(SCLK_PVTM_GPU, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS),
709 GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3288_CLKGATE_CON(5), 15, GFLAGS), 709 GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3288_CLKGATE_CON(5), 15, GFLAGS),
710 710
711 /* sclk_gpu gates */ 711 /* sclk_gpu gates */
@@ -805,6 +805,20 @@ static int rk3288_clk_suspend(void)
805 rk3288_saved_cru_regs[i] = 805 rk3288_saved_cru_regs[i] =
806 readl_relaxed(rk3288_cru_base + reg_id); 806 readl_relaxed(rk3288_cru_base + reg_id);
807 } 807 }
808
809 /*
810 * Switch PLLs other than DPLL (for SDRAM) to slow mode to
811 * avoid crashes on resume. The Mask ROM on the system will
812 * put APLL, CPLL, and GPLL into slow mode at resume time
813 * anyway (which is why we restore them), but we might not
814 * even make it to the Mask ROM if this isn't done at suspend
815 * time.
816 *
817 * NOTE: only APLL truly matters here, but we'll do them all.
818 */
819
820 writel_relaxed(0xf3030000, rk3288_cru_base + RK3288_MODE_CON);
821
808 return 0; 822 return 0;
809} 823}
810 824
@@ -866,6 +880,14 @@ static void __init rk3288_clk_init(struct device_node *np)
866 pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n", 880 pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
867 __func__, PTR_ERR(clk)); 881 __func__, PTR_ERR(clk));
868 882
883 /* Watchdog pclk is controlled by RK3288_SGRF_SOC_CON0[1]. */
884 clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
885 if (IS_ERR(clk))
886 pr_warn("%s: could not register clock pclk_wdt: %ld\n",
887 __func__, PTR_ERR(clk));
888 else
889 rockchip_clk_add_lookup(clk, PCLK_WDT);
890
869 rockchip_clk_register_plls(rk3288_pll_clks, 891 rockchip_clk_register_plls(rk3288_pll_clks,
870 ARRAY_SIZE(rk3288_pll_clks), 892 ARRAY_SIZE(rk3288_pll_clks),
871 RK3288_GRF_SOC_STATUS1); 893 RK3288_GRF_SOC_STATUS1);
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index f2c2ccce49bb..454b02ae486a 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -82,6 +82,26 @@ static const struct of_device_id exynos_audss_clk_of_match[] = {
82 {}, 82 {},
83}; 83};
84 84
85static void exynos_audss_clk_teardown(void)
86{
87 int i;
88
89 for (i = EXYNOS_MOUT_AUDSS; i < EXYNOS_DOUT_SRP; i++) {
90 if (!IS_ERR(clk_table[i]))
91 clk_unregister_mux(clk_table[i]);
92 }
93
94 for (; i < EXYNOS_SRP_CLK; i++) {
95 if (!IS_ERR(clk_table[i]))
96 clk_unregister_divider(clk_table[i]);
97 }
98
99 for (; i < clk_data.clk_num; i++) {
100 if (!IS_ERR(clk_table[i]))
101 clk_unregister_gate(clk_table[i]);
102 }
103}
104
85/* register exynos_audss clocks */ 105/* register exynos_audss clocks */
86static int exynos_audss_clk_probe(struct platform_device *pdev) 106static int exynos_audss_clk_probe(struct platform_device *pdev)
87{ 107{
@@ -219,10 +239,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
219 return 0; 239 return 0;
220 240
221unregister: 241unregister:
222 for (i = 0; i < clk_data.clk_num; i++) { 242 exynos_audss_clk_teardown();
223 if (!IS_ERR(clk_table[i]))
224 clk_unregister(clk_table[i]);
225 }
226 243
227 if (!IS_ERR(epll)) 244 if (!IS_ERR(epll))
228 clk_disable_unprepare(epll); 245 clk_disable_unprepare(epll);
@@ -232,18 +249,13 @@ unregister:
232 249
233static int exynos_audss_clk_remove(struct platform_device *pdev) 250static int exynos_audss_clk_remove(struct platform_device *pdev)
234{ 251{
235 int i;
236
237#ifdef CONFIG_PM_SLEEP 252#ifdef CONFIG_PM_SLEEP
238 unregister_syscore_ops(&exynos_audss_clk_syscore_ops); 253 unregister_syscore_ops(&exynos_audss_clk_syscore_ops);
239#endif 254#endif
240 255
241 of_clk_del_provider(pdev->dev.of_node); 256 of_clk_del_provider(pdev->dev.of_node);
242 257
243 for (i = 0; i < clk_data.clk_num; i++) { 258 exynos_audss_clk_teardown();
244 if (!IS_ERR(clk_table[i]))
245 clk_unregister(clk_table[i]);
246 }
247 259
248 if (!IS_ERR(epll)) 260 if (!IS_ERR(epll))
249 clk_disable_unprepare(epll); 261 clk_disable_unprepare(epll);
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 6e6cca392082..cc4c348d8a24 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -104,27 +104,6 @@
104#define PWR_CTRL1_USE_CORE1_WFI (1 << 1) 104#define PWR_CTRL1_USE_CORE1_WFI (1 << 1)
105#define PWR_CTRL1_USE_CORE0_WFI (1 << 0) 105#define PWR_CTRL1_USE_CORE0_WFI (1 << 0)
106 106
107/* list of PLLs to be registered */
108enum exynos3250_plls {
109 apll, mpll, vpll, upll,
110 nr_plls
111};
112
113/* list of PLLs in DMC block to be registered */
114enum exynos3250_dmc_plls {
115 bpll, epll,
116 nr_dmc_plls
117};
118
119static void __iomem *reg_base;
120static void __iomem *dmc_reg_base;
121
122/*
123 * Support for CMU save/restore across system suspends
124 */
125#ifdef CONFIG_PM_SLEEP
126static struct samsung_clk_reg_dump *exynos3250_clk_regs;
127
128static unsigned long exynos3250_cmu_clk_regs[] __initdata = { 107static unsigned long exynos3250_cmu_clk_regs[] __initdata = {
129 SRC_LEFTBUS, 108 SRC_LEFTBUS,
130 DIV_LEFTBUS, 109 DIV_LEFTBUS,
@@ -195,43 +174,6 @@ static unsigned long exynos3250_cmu_clk_regs[] __initdata = {
195 PWR_CTRL2, 174 PWR_CTRL2,
196}; 175};
197 176
198static int exynos3250_clk_suspend(void)
199{
200 samsung_clk_save(reg_base, exynos3250_clk_regs,
201 ARRAY_SIZE(exynos3250_cmu_clk_regs));
202 return 0;
203}
204
205static void exynos3250_clk_resume(void)
206{
207 samsung_clk_restore(reg_base, exynos3250_clk_regs,
208 ARRAY_SIZE(exynos3250_cmu_clk_regs));
209}
210
211static struct syscore_ops exynos3250_clk_syscore_ops = {
212 .suspend = exynos3250_clk_suspend,
213 .resume = exynos3250_clk_resume,
214};
215
216static void exynos3250_clk_sleep_init(void)
217{
218 exynos3250_clk_regs =
219 samsung_clk_alloc_reg_dump(exynos3250_cmu_clk_regs,
220 ARRAY_SIZE(exynos3250_cmu_clk_regs));
221 if (!exynos3250_clk_regs) {
222 pr_warn("%s: Failed to allocate sleep save data\n", __func__);
223 goto err;
224 }
225
226 register_syscore_ops(&exynos3250_clk_syscore_ops);
227 return;
228err:
229 kfree(exynos3250_clk_regs);
230}
231#else
232static inline void exynos3250_clk_sleep_init(void) { }
233#endif
234
235/* list of all parent clock list */ 177/* list of all parent clock list */
236PNAME(mout_vpllsrc_p) = { "fin_pll", }; 178PNAME(mout_vpllsrc_p) = { "fin_pll", };
237 179
@@ -782,18 +724,18 @@ static struct samsung_pll_rate_table exynos3250_vpll_rates[] = {
782 { /* sentinel */ } 724 { /* sentinel */ }
783}; 725};
784 726
785static struct samsung_pll_clock exynos3250_plls[nr_plls] __initdata = { 727static struct samsung_pll_clock exynos3250_plls[] __initdata = {
786 [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", 728 PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
787 APLL_LOCK, APLL_CON0, NULL), 729 APLL_LOCK, APLL_CON0, exynos3250_pll_rates),
788 [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll", 730 PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
789 MPLL_LOCK, MPLL_CON0, NULL), 731 MPLL_LOCK, MPLL_CON0, exynos3250_pll_rates),
790 [vpll] = PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll", 732 PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll",
791 VPLL_LOCK, VPLL_CON0, NULL), 733 VPLL_LOCK, VPLL_CON0, exynos3250_vpll_rates),
792 [upll] = PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll", 734 PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll",
793 UPLL_LOCK, UPLL_CON0, NULL), 735 UPLL_LOCK, UPLL_CON0, exynos3250_pll_rates),
794}; 736};
795 737
796static void __init exynos3_core_down_clock(void) 738static void __init exynos3_core_down_clock(void __iomem *reg_base)
797{ 739{
798 unsigned int tmp; 740 unsigned int tmp;
799 741
@@ -814,38 +756,31 @@ static void __init exynos3_core_down_clock(void)
814 __raw_writel(0x0, reg_base + PWR_CTRL2); 756 __raw_writel(0x0, reg_base + PWR_CTRL2);
815} 757}
816 758
759static struct samsung_cmu_info cmu_info __initdata = {
760 .pll_clks = exynos3250_plls,
761 .nr_pll_clks = ARRAY_SIZE(exynos3250_plls),
762 .mux_clks = mux_clks,
763 .nr_mux_clks = ARRAY_SIZE(mux_clks),
764 .div_clks = div_clks,
765 .nr_div_clks = ARRAY_SIZE(div_clks),
766 .gate_clks = gate_clks,
767 .nr_gate_clks = ARRAY_SIZE(gate_clks),
768 .fixed_factor_clks = fixed_factor_clks,
769 .nr_fixed_factor_clks = ARRAY_SIZE(fixed_factor_clks),
770 .nr_clk_ids = CLK_NR_CLKS,
771 .clk_regs = exynos3250_cmu_clk_regs,
772 .nr_clk_regs = ARRAY_SIZE(exynos3250_cmu_clk_regs),
773};
774
817static void __init exynos3250_cmu_init(struct device_node *np) 775static void __init exynos3250_cmu_init(struct device_node *np)
818{ 776{
819 struct samsung_clk_provider *ctx; 777 struct samsung_clk_provider *ctx;
820 778
821 reg_base = of_iomap(np, 0); 779 ctx = samsung_cmu_register_one(np, &cmu_info);
822 if (!reg_base)
823 panic("%s: failed to map registers\n", __func__);
824
825 ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
826 if (!ctx) 780 if (!ctx)
827 panic("%s: unable to allocate context.\n", __func__); 781 return;
828
829 samsung_clk_register_fixed_factor(ctx, fixed_factor_clks,
830 ARRAY_SIZE(fixed_factor_clks));
831
832 exynos3250_plls[apll].rate_table = exynos3250_pll_rates;
833 exynos3250_plls[mpll].rate_table = exynos3250_pll_rates;
834 exynos3250_plls[vpll].rate_table = exynos3250_vpll_rates;
835 exynos3250_plls[upll].rate_table = exynos3250_pll_rates;
836
837 samsung_clk_register_pll(ctx, exynos3250_plls,
838 ARRAY_SIZE(exynos3250_plls), reg_base);
839
840 samsung_clk_register_mux(ctx, mux_clks, ARRAY_SIZE(mux_clks));
841 samsung_clk_register_div(ctx, div_clks, ARRAY_SIZE(div_clks));
842 samsung_clk_register_gate(ctx, gate_clks, ARRAY_SIZE(gate_clks));
843
844 exynos3_core_down_clock();
845 782
846 exynos3250_clk_sleep_init(); 783 exynos3_core_down_clock(ctx->reg_base);
847
848 samsung_clk_of_add_provider(np, ctx);
849} 784}
850CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init); 785CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
851 786
@@ -872,12 +807,6 @@ CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
872#define EPLL_CON2 0x111c 807#define EPLL_CON2 0x111c
873#define SRC_EPLL 0x1120 808#define SRC_EPLL 0x1120
874 809
875/*
876 * Support for CMU save/restore across system suspends
877 */
878#ifdef CONFIG_PM_SLEEP
879static struct samsung_clk_reg_dump *exynos3250_dmc_clk_regs;
880
881static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = { 810static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = {
882 BPLL_LOCK, 811 BPLL_LOCK,
883 BPLL_CON0, 812 BPLL_CON0,
@@ -899,43 +828,6 @@ static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = {
899 SRC_EPLL, 828 SRC_EPLL,
900}; 829};
901 830
902static int exynos3250_dmc_clk_suspend(void)
903{
904 samsung_clk_save(dmc_reg_base, exynos3250_dmc_clk_regs,
905 ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
906 return 0;
907}
908
909static void exynos3250_dmc_clk_resume(void)
910{
911 samsung_clk_restore(dmc_reg_base, exynos3250_dmc_clk_regs,
912 ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
913}
914
915static struct syscore_ops exynos3250_dmc_clk_syscore_ops = {
916 .suspend = exynos3250_dmc_clk_suspend,
917 .resume = exynos3250_dmc_clk_resume,
918};
919
920static void exynos3250_dmc_clk_sleep_init(void)
921{
922 exynos3250_dmc_clk_regs =
923 samsung_clk_alloc_reg_dump(exynos3250_cmu_dmc_clk_regs,
924 ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
925 if (!exynos3250_dmc_clk_regs) {
926 pr_warn("%s: Failed to allocate sleep save data\n", __func__);
927 goto err;
928 }
929
930 register_syscore_ops(&exynos3250_dmc_clk_syscore_ops);
931 return;
932err:
933 kfree(exynos3250_dmc_clk_regs);
934}
935#else
936static inline void exynos3250_dmc_clk_sleep_init(void) { }
937#endif
938
939PNAME(mout_epll_p) = { "fin_pll", "fout_epll", }; 831PNAME(mout_epll_p) = { "fin_pll", "fout_epll", };
940PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", }; 832PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", };
941PNAME(mout_mpll_mif_p) = { "fin_pll", "sclk_mpll_mif", }; 833PNAME(mout_mpll_mif_p) = { "fin_pll", "sclk_mpll_mif", };
@@ -977,43 +869,28 @@ static struct samsung_div_clock dmc_div_clks[] __initdata = {
977 DIV(CLK_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3), 869 DIV(CLK_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3),
978}; 870};
979 871
980static struct samsung_pll_clock exynos3250_dmc_plls[nr_dmc_plls] __initdata = { 872static struct samsung_pll_clock exynos3250_dmc_plls[] __initdata = {
981 [bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", 873 PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll",
982 BPLL_LOCK, BPLL_CON0, NULL), 874 BPLL_LOCK, BPLL_CON0, exynos3250_pll_rates),
983 [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll", 875 PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
984 EPLL_LOCK, EPLL_CON0, NULL), 876 EPLL_LOCK, EPLL_CON0, exynos3250_epll_rates),
877};
878
879static struct samsung_cmu_info dmc_cmu_info __initdata = {
880 .pll_clks = exynos3250_dmc_plls,
881 .nr_pll_clks = ARRAY_SIZE(exynos3250_dmc_plls),
882 .mux_clks = dmc_mux_clks,
883 .nr_mux_clks = ARRAY_SIZE(dmc_mux_clks),
884 .div_clks = dmc_div_clks,
885 .nr_div_clks = ARRAY_SIZE(dmc_div_clks),
886 .nr_clk_ids = NR_CLKS_DMC,
887 .clk_regs = exynos3250_cmu_dmc_clk_regs,
888 .nr_clk_regs = ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs),
985}; 889};
986 890
987static void __init exynos3250_cmu_dmc_init(struct device_node *np) 891static void __init exynos3250_cmu_dmc_init(struct device_node *np)
988{ 892{
989 struct samsung_clk_provider *ctx; 893 samsung_cmu_register_one(np, &dmc_cmu_info);
990
991 dmc_reg_base = of_iomap(np, 0);
992 if (!dmc_reg_base)
993 panic("%s: failed to map registers\n", __func__);
994
995 ctx = samsung_clk_init(np, dmc_reg_base, NR_CLKS_DMC);
996 if (!ctx)
997 panic("%s: unable to allocate context.\n", __func__);
998
999 exynos3250_dmc_plls[bpll].rate_table = exynos3250_pll_rates;
1000 exynos3250_dmc_plls[epll].rate_table = exynos3250_epll_rates;
1001
1002 pr_err("CLK registering epll bpll: %d, %d, %d, %d\n",
1003 exynos3250_dmc_plls[bpll].rate_table[0].rate,
1004 exynos3250_dmc_plls[bpll].rate_table[0].mdiv,
1005 exynos3250_dmc_plls[bpll].rate_table[0].pdiv,
1006 exynos3250_dmc_plls[bpll].rate_table[0].sdiv
1007 );
1008 samsung_clk_register_pll(ctx, exynos3250_dmc_plls,
1009 ARRAY_SIZE(exynos3250_dmc_plls), dmc_reg_base);
1010
1011 samsung_clk_register_mux(ctx, dmc_mux_clks, ARRAY_SIZE(dmc_mux_clks));
1012 samsung_clk_register_div(ctx, dmc_div_clks, ARRAY_SIZE(dmc_div_clks));
1013
1014 exynos3250_dmc_clk_sleep_init();
1015
1016 samsung_clk_of_add_provider(np, ctx);
1017} 894}
1018CLK_OF_DECLARE(exynos3250_cmu_dmc, "samsung,exynos3250-cmu-dmc", 895CLK_OF_DECLARE(exynos3250_cmu_dmc, "samsung,exynos3250-cmu-dmc",
1019 exynos3250_cmu_dmc_init); 896 exynos3250_cmu_dmc_init);
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 88e8c6bbd77f..51462e85675f 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -703,12 +703,12 @@ static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
703 703
704/* list of divider clocks supported in all exynos4 soc's */ 704/* list of divider clocks supported in all exynos4 soc's */
705static struct samsung_div_clock exynos4_div_clks[] __initdata = { 705static struct samsung_div_clock exynos4_div_clks[] __initdata = {
706 DIV(0, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3), 706 DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3),
707 DIV(0, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3), 707 DIV(0, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3),
708 DIV(0, "div_clkout_leftbus", "mout_clkout_leftbus", 708 DIV(0, "div_clkout_leftbus", "mout_clkout_leftbus",
709 CLKOUT_CMU_LEFTBUS, 8, 6), 709 CLKOUT_CMU_LEFTBUS, 8, 6),
710 710
711 DIV(0, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3), 711 DIV(CLK_DIV_GDR, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3),
712 DIV(0, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3), 712 DIV(0, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3),
713 DIV(0, "div_clkout_rightbus", "mout_clkout_rightbus", 713 DIV(0, "div_clkout_rightbus", "mout_clkout_rightbus",
714 CLKOUT_CMU_RIGHTBUS, 8, 6), 714 CLKOUT_CMU_RIGHTBUS, 8, 6),
@@ -781,10 +781,10 @@ static struct samsung_div_clock exynos4_div_clks[] __initdata = {
781 CLK_SET_RATE_PARENT, 0), 781 CLK_SET_RATE_PARENT, 0),
782 DIV(0, "div_clkout_top", "mout_clkout_top", CLKOUT_CMU_TOP, 8, 6), 782 DIV(0, "div_clkout_top", "mout_clkout_top", CLKOUT_CMU_TOP, 8, 6),
783 783
784 DIV(0, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3), 784 DIV(CLK_DIV_ACP, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3),
785 DIV(0, "div_acp_pclk", "div_acp", DIV_DMC0, 4, 3), 785 DIV(0, "div_acp_pclk", "div_acp", DIV_DMC0, 4, 3),
786 DIV(0, "div_dphy", "mout_dphy", DIV_DMC0, 8, 3), 786 DIV(0, "div_dphy", "mout_dphy", DIV_DMC0, 8, 3),
787 DIV(0, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3), 787 DIV(CLK_DIV_DMC, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3),
788 DIV(0, "div_dmcd", "div_dmc", DIV_DMC0, 16, 3), 788 DIV(0, "div_dmcd", "div_dmc", DIV_DMC0, 16, 3),
789 DIV(0, "div_dmcp", "div_dmcd", DIV_DMC0, 20, 3), 789 DIV(0, "div_dmcp", "div_dmcd", DIV_DMC0, 20, 3),
790 DIV(0, "div_pwi", "mout_pwi", DIV_DMC1, 8, 4), 790 DIV(0, "div_pwi", "mout_pwi", DIV_DMC1, 8, 4),
@@ -829,7 +829,7 @@ static struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
829 DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 829 DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
830 8, 3, CLK_GET_RATE_NOCACHE, 0), 830 8, 3, CLK_GET_RATE_NOCACHE, 0),
831 DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), 831 DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
832 DIV(0, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3), 832 DIV(CLK_DIV_C2C, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
833 DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3), 833 DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
834}; 834};
835 835
diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c
index 2123fc251e0f..6c78b09c829f 100644
--- a/drivers/clk/samsung/clk-exynos4415.c
+++ b/drivers/clk/samsung/clk-exynos4415.c
@@ -113,19 +113,6 @@
113#define DIV_CPU0 0x14500 113#define DIV_CPU0 0x14500
114#define DIV_CPU1 0x14504 114#define DIV_CPU1 0x14504
115 115
116enum exynos4415_plls {
117 apll, epll, g3d_pll, isp_pll, disp_pll,
118 nr_plls,
119};
120
121static struct samsung_clk_provider *exynos4415_ctx;
122
123/*
124 * Support for CMU save/restore across system suspends
125 */
126#ifdef CONFIG_PM_SLEEP
127static struct samsung_clk_reg_dump *exynos4415_clk_regs;
128
129static unsigned long exynos4415_cmu_clk_regs[] __initdata = { 116static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
130 SRC_LEFTBUS, 117 SRC_LEFTBUS,
131 DIV_LEFTBUS, 118 DIV_LEFTBUS,
@@ -219,41 +206,6 @@ static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
219 DIV_CPU1, 206 DIV_CPU1,
220}; 207};
221 208
222static int exynos4415_clk_suspend(void)
223{
224 samsung_clk_save(exynos4415_ctx->reg_base, exynos4415_clk_regs,
225 ARRAY_SIZE(exynos4415_cmu_clk_regs));
226
227 return 0;
228}
229
230static void exynos4415_clk_resume(void)
231{
232 samsung_clk_restore(exynos4415_ctx->reg_base, exynos4415_clk_regs,
233 ARRAY_SIZE(exynos4415_cmu_clk_regs));
234}
235
236static struct syscore_ops exynos4415_clk_syscore_ops = {
237 .suspend = exynos4415_clk_suspend,
238 .resume = exynos4415_clk_resume,
239};
240
241static void exynos4415_clk_sleep_init(void)
242{
243 exynos4415_clk_regs =
244 samsung_clk_alloc_reg_dump(exynos4415_cmu_clk_regs,
245 ARRAY_SIZE(exynos4415_cmu_clk_regs));
246 if (!exynos4415_clk_regs) {
247 pr_warn("%s: Failed to allocate sleep save data\n", __func__);
248 return;
249 }
250
251 register_syscore_ops(&exynos4415_clk_syscore_ops);
252}
253#else
254static inline void exynos4415_clk_sleep_init(void) { }
255#endif
256
257/* list of all parent clock list */ 209/* list of all parent clock list */
258PNAME(mout_g3d_pllsrc_p) = { "fin_pll", }; 210PNAME(mout_g3d_pllsrc_p) = { "fin_pll", };
259 211
@@ -959,56 +911,40 @@ static struct samsung_pll_rate_table exynos4415_epll_rates[] = {
959 { /* sentinel */ } 911 { /* sentinel */ }
960}; 912};
961 913
962static struct samsung_pll_clock exynos4415_plls[nr_plls] __initdata = { 914static struct samsung_pll_clock exynos4415_plls[] __initdata = {
963 [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", 915 PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
964 APLL_LOCK, APLL_CON0, NULL), 916 APLL_LOCK, APLL_CON0, exynos4415_pll_rates),
965 [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll", 917 PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
966 EPLL_LOCK, EPLL_CON0, NULL), 918 EPLL_LOCK, EPLL_CON0, exynos4415_epll_rates),
967 [g3d_pll] = PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", 919 PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "mout_g3d_pllsrc",
968 "mout_g3d_pllsrc", G3D_PLL_LOCK, G3D_PLL_CON0, NULL), 920 G3D_PLL_LOCK, G3D_PLL_CON0, exynos4415_pll_rates),
969 [isp_pll] = PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll", 921 PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll",
970 ISP_PLL_LOCK, ISP_PLL_CON0, NULL), 922 ISP_PLL_LOCK, ISP_PLL_CON0, exynos4415_pll_rates),
971 [disp_pll] = PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll", 923 PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll",
972 "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, NULL), 924 "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, exynos4415_pll_rates),
925};
926
927static struct samsung_cmu_info cmu_info __initdata = {
928 .pll_clks = exynos4415_plls,
929 .nr_pll_clks = ARRAY_SIZE(exynos4415_plls),
930 .mux_clks = exynos4415_mux_clks,
931 .nr_mux_clks = ARRAY_SIZE(exynos4415_mux_clks),
932 .div_clks = exynos4415_div_clks,
933 .nr_div_clks = ARRAY_SIZE(exynos4415_div_clks),
934 .gate_clks = exynos4415_gate_clks,
935 .nr_gate_clks = ARRAY_SIZE(exynos4415_gate_clks),
936 .fixed_clks = exynos4415_fixed_rate_clks,
937 .nr_fixed_clks = ARRAY_SIZE(exynos4415_fixed_rate_clks),
938 .fixed_factor_clks = exynos4415_fixed_factor_clks,
939 .nr_fixed_factor_clks = ARRAY_SIZE(exynos4415_fixed_factor_clks),
940 .nr_clk_ids = CLK_NR_CLKS,
941 .clk_regs = exynos4415_cmu_clk_regs,
942 .nr_clk_regs = ARRAY_SIZE(exynos4415_cmu_clk_regs),
973}; 943};
974 944
975static void __init exynos4415_cmu_init(struct device_node *np) 945static void __init exynos4415_cmu_init(struct device_node *np)
976{ 946{
977 void __iomem *reg_base; 947 samsung_cmu_register_one(np, &cmu_info);
978
979 reg_base = of_iomap(np, 0);
980 if (!reg_base)
981 panic("%s: failed to map registers\n", __func__);
982
983 exynos4415_ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
984 if (!exynos4415_ctx)
985 panic("%s: unable to allocate context.\n", __func__);
986
987 exynos4415_plls[apll].rate_table = exynos4415_pll_rates;
988 exynos4415_plls[epll].rate_table = exynos4415_epll_rates;
989 exynos4415_plls[g3d_pll].rate_table = exynos4415_pll_rates;
990 exynos4415_plls[isp_pll].rate_table = exynos4415_pll_rates;
991 exynos4415_plls[disp_pll].rate_table = exynos4415_pll_rates;
992
993 samsung_clk_register_fixed_factor(exynos4415_ctx,
994 exynos4415_fixed_factor_clks,
995 ARRAY_SIZE(exynos4415_fixed_factor_clks));
996 samsung_clk_register_fixed_rate(exynos4415_ctx,
997 exynos4415_fixed_rate_clks,
998 ARRAY_SIZE(exynos4415_fixed_rate_clks));
999
1000 samsung_clk_register_pll(exynos4415_ctx, exynos4415_plls,
1001 ARRAY_SIZE(exynos4415_plls), reg_base);
1002 samsung_clk_register_mux(exynos4415_ctx, exynos4415_mux_clks,
1003 ARRAY_SIZE(exynos4415_mux_clks));
1004 samsung_clk_register_div(exynos4415_ctx, exynos4415_div_clks,
1005 ARRAY_SIZE(exynos4415_div_clks));
1006 samsung_clk_register_gate(exynos4415_ctx, exynos4415_gate_clks,
1007 ARRAY_SIZE(exynos4415_gate_clks));
1008
1009 exynos4415_clk_sleep_init();
1010
1011 samsung_clk_of_add_provider(np, exynos4415_ctx);
1012} 948}
1013CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init); 949CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
1014 950
@@ -1027,16 +963,6 @@ CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
1027#define SRC_DMC 0x300 963#define SRC_DMC 0x300
1028#define DIV_DMC1 0x504 964#define DIV_DMC1 0x504
1029 965
1030enum exynos4415_dmc_plls {
1031 mpll, bpll,
1032 nr_dmc_plls,
1033};
1034
1035static struct samsung_clk_provider *exynos4415_dmc_ctx;
1036
1037#ifdef CONFIG_PM_SLEEP
1038static struct samsung_clk_reg_dump *exynos4415_dmc_clk_regs;
1039
1040static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = { 966static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
1041 MPLL_LOCK, 967 MPLL_LOCK,
1042 MPLL_CON0, 968 MPLL_CON0,
@@ -1050,42 +976,6 @@ static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
1050 DIV_DMC1, 976 DIV_DMC1,
1051}; 977};
1052 978
1053static int exynos4415_dmc_clk_suspend(void)
1054{
1055 samsung_clk_save(exynos4415_dmc_ctx->reg_base,
1056 exynos4415_dmc_clk_regs,
1057 ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
1058 return 0;
1059}
1060
1061static void exynos4415_dmc_clk_resume(void)
1062{
1063 samsung_clk_restore(exynos4415_dmc_ctx->reg_base,
1064 exynos4415_dmc_clk_regs,
1065 ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
1066}
1067
1068static struct syscore_ops exynos4415_dmc_clk_syscore_ops = {
1069 .suspend = exynos4415_dmc_clk_suspend,
1070 .resume = exynos4415_dmc_clk_resume,
1071};
1072
1073static void exynos4415_dmc_clk_sleep_init(void)
1074{
1075 exynos4415_dmc_clk_regs =
1076 samsung_clk_alloc_reg_dump(exynos4415_cmu_dmc_clk_regs,
1077 ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
1078 if (!exynos4415_dmc_clk_regs) {
1079 pr_warn("%s: Failed to allocate sleep save data\n", __func__);
1080 return;
1081 }
1082
1083 register_syscore_ops(&exynos4415_dmc_clk_syscore_ops);
1084}
1085#else
1086static inline void exynos4415_dmc_clk_sleep_init(void) { }
1087#endif /* CONFIG_PM_SLEEP */
1088
1089PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", }; 979PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", };
1090PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", }; 980PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", };
1091PNAME(mbpll_p) = { "mout_mpll", "mout_bpll", }; 981PNAME(mbpll_p) = { "mout_mpll", "mout_bpll", };
@@ -1107,38 +997,28 @@ static struct samsung_div_clock exynos4415_dmc_div_clks[] __initdata = {
1107 DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2), 997 DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2),
1108}; 998};
1109 999
1110static struct samsung_pll_clock exynos4415_dmc_plls[nr_dmc_plls] __initdata = { 1000static struct samsung_pll_clock exynos4415_dmc_plls[] __initdata = {
1111 [mpll] = PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll", 1001 PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
1112 MPLL_LOCK, MPLL_CON0, NULL), 1002 MPLL_LOCK, MPLL_CON0, exynos4415_pll_rates),
1113 [bpll] = PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll", 1003 PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
1114 BPLL_LOCK, BPLL_CON0, NULL), 1004 BPLL_LOCK, BPLL_CON0, exynos4415_pll_rates),
1005};
1006
1007static struct samsung_cmu_info cmu_dmc_info __initdata = {
1008 .pll_clks = exynos4415_dmc_plls,
1009 .nr_pll_clks = ARRAY_SIZE(exynos4415_dmc_plls),
1010 .mux_clks = exynos4415_dmc_mux_clks,
1011 .nr_mux_clks = ARRAY_SIZE(exynos4415_dmc_mux_clks),
1012 .div_clks = exynos4415_dmc_div_clks,
1013 .nr_div_clks = ARRAY_SIZE(exynos4415_dmc_div_clks),
1014 .nr_clk_ids = NR_CLKS_DMC,
1015 .clk_regs = exynos4415_cmu_dmc_clk_regs,
1016 .nr_clk_regs = ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs),
1115}; 1017};
1116 1018
1117static void __init exynos4415_cmu_dmc_init(struct device_node *np) 1019static void __init exynos4415_cmu_dmc_init(struct device_node *np)
1118{ 1020{
1119 void __iomem *reg_base; 1021 samsung_cmu_register_one(np, &cmu_dmc_info);
1120
1121 reg_base = of_iomap(np, 0);
1122 if (!reg_base)
1123 panic("%s: failed to map registers\n", __func__);
1124
1125 exynos4415_dmc_ctx = samsung_clk_init(np, reg_base, NR_CLKS_DMC);
1126 if (!exynos4415_dmc_ctx)
1127 panic("%s: unable to allocate context.\n", __func__);
1128
1129 exynos4415_dmc_plls[mpll].rate_table = exynos4415_pll_rates;
1130 exynos4415_dmc_plls[bpll].rate_table = exynos4415_pll_rates;
1131
1132 samsung_clk_register_pll(exynos4415_dmc_ctx, exynos4415_dmc_plls,
1133 ARRAY_SIZE(exynos4415_dmc_plls), reg_base);
1134 samsung_clk_register_mux(exynos4415_dmc_ctx, exynos4415_dmc_mux_clks,
1135 ARRAY_SIZE(exynos4415_dmc_mux_clks));
1136 samsung_clk_register_div(exynos4415_dmc_ctx, exynos4415_dmc_div_clks,
1137 ARRAY_SIZE(exynos4415_dmc_div_clks));
1138
1139 exynos4415_dmc_clk_sleep_init();
1140
1141 samsung_clk_of_add_provider(np, exynos4415_dmc_ctx);
1142} 1022}
1143CLK_OF_DECLARE(exynos4415_cmu_dmc, "samsung,exynos4415-cmu-dmc", 1023CLK_OF_DECLARE(exynos4415_cmu_dmc, "samsung,exynos4415-cmu-dmc",
1144 exynos4415_cmu_dmc_init); 1024 exynos4415_cmu_dmc_init);
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index ea4483b8d62e..03d36e847b78 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -34,6 +34,7 @@
34#define DIV_TOPC0 0x0600 34#define DIV_TOPC0 0x0600
35#define DIV_TOPC1 0x0604 35#define DIV_TOPC1 0x0604
36#define DIV_TOPC3 0x060C 36#define DIV_TOPC3 0x060C
37#define ENABLE_ACLK_TOPC1 0x0804
37 38
38static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = { 39static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
39 FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_bus0_pll_ctrl", 1, 2, 0), 40 FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_bus0_pll_ctrl", 1, 2, 0),
@@ -45,6 +46,7 @@ static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
45}; 46};
46 47
47/* List of parent clocks for Muxes in CMU_TOPC */ 48/* List of parent clocks for Muxes in CMU_TOPC */
49PNAME(mout_aud_pll_ctrl_p) = { "fin_pll", "fout_aud_pll" };
48PNAME(mout_bus0_pll_ctrl_p) = { "fin_pll", "fout_bus0_pll" }; 50PNAME(mout_bus0_pll_ctrl_p) = { "fin_pll", "fout_bus0_pll" };
49PNAME(mout_bus1_pll_ctrl_p) = { "fin_pll", "fout_bus1_pll" }; 51PNAME(mout_bus1_pll_ctrl_p) = { "fin_pll", "fout_bus1_pll" };
50PNAME(mout_cc_pll_ctrl_p) = { "fin_pll", "fout_cc_pll" }; 52PNAME(mout_cc_pll_ctrl_p) = { "fin_pll", "fout_cc_pll" };
@@ -104,9 +106,11 @@ static struct samsung_mux_clock topc_mux_clks[] __initdata = {
104 106
105 MUX(0, "mout_sclk_bus0_pll_out", mout_sclk_bus0_pll_out_p, 107 MUX(0, "mout_sclk_bus0_pll_out", mout_sclk_bus0_pll_out_p,
106 MUX_SEL_TOPC1, 16, 1), 108 MUX_SEL_TOPC1, 16, 1),
109 MUX(0, "mout_aud_pll_ctrl", mout_aud_pll_ctrl_p, MUX_SEL_TOPC1, 0, 1),
107 110
108 MUX(0, "mout_aclk_ccore_133", mout_topc_group2, MUX_SEL_TOPC2, 4, 2), 111 MUX(0, "mout_aclk_ccore_133", mout_topc_group2, MUX_SEL_TOPC2, 4, 2),
109 112
113 MUX(0, "mout_aclk_mscl_532", mout_topc_group2, MUX_SEL_TOPC3, 20, 2),
110 MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2), 114 MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2),
111}; 115};
112 116
@@ -114,6 +118,8 @@ static struct samsung_div_clock topc_div_clks[] __initdata = {
114 DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133", 118 DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133",
115 DIV_TOPC0, 4, 4), 119 DIV_TOPC0, 4, 4),
116 120
121 DIV(DOUT_ACLK_MSCL_532, "dout_aclk_mscl_532", "mout_aclk_mscl_532",
122 DIV_TOPC1, 20, 4),
117 DIV(DOUT_ACLK_PERIS, "dout_aclk_peris_66", "mout_aclk_peris_66", 123 DIV(DOUT_ACLK_PERIS, "dout_aclk_peris_66", "mout_aclk_peris_66",
118 DIV_TOPC1, 24, 4), 124 DIV_TOPC1, 24, 4),
119 125
@@ -125,6 +131,18 @@ static struct samsung_div_clock topc_div_clks[] __initdata = {
125 DIV_TOPC3, 12, 3), 131 DIV_TOPC3, 12, 3),
126 DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_mfc_pll_ctrl", 132 DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_mfc_pll_ctrl",
127 DIV_TOPC3, 16, 3), 133 DIV_TOPC3, 16, 3),
134 DIV(DOUT_SCLK_AUD_PLL, "dout_sclk_aud_pll", "mout_aud_pll_ctrl",
135 DIV_TOPC3, 28, 3),
136};
137
138static struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initdata = {
139 PLL_36XX_RATE(491520000, 20, 1, 0, 31457),
140 {},
141};
142
143static struct samsung_gate_clock topc_gate_clks[] __initdata = {
144 GATE(ACLK_MSCL_532, "aclk_mscl_532", "dout_aclk_mscl_532",
145 ENABLE_ACLK_TOPC1, 20, 0, 0),
128}; 146};
129 147
130static struct samsung_pll_clock topc_pll_clks[] __initdata = { 148static struct samsung_pll_clock topc_pll_clks[] __initdata = {
@@ -136,8 +154,8 @@ static struct samsung_pll_clock topc_pll_clks[] __initdata = {
136 BUS1_DPLL_CON0, NULL), 154 BUS1_DPLL_CON0, NULL),
137 PLL(pll_1452x, 0, "fout_mfc_pll", "fin_pll", MFC_PLL_LOCK, 155 PLL(pll_1452x, 0, "fout_mfc_pll", "fin_pll", MFC_PLL_LOCK,
138 MFC_PLL_CON0, NULL), 156 MFC_PLL_CON0, NULL),
139 PLL(pll_1460x, 0, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK, 157 PLL(pll_1460x, FOUT_AUD_PLL, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK,
140 AUD_PLL_CON0, NULL), 158 AUD_PLL_CON0, pll1460x_24mhz_tbl),
141}; 159};
142 160
143static struct samsung_cmu_info topc_cmu_info __initdata = { 161static struct samsung_cmu_info topc_cmu_info __initdata = {
@@ -147,6 +165,8 @@ static struct samsung_cmu_info topc_cmu_info __initdata = {
147 .nr_mux_clks = ARRAY_SIZE(topc_mux_clks), 165 .nr_mux_clks = ARRAY_SIZE(topc_mux_clks),
148 .div_clks = topc_div_clks, 166 .div_clks = topc_div_clks,
149 .nr_div_clks = ARRAY_SIZE(topc_div_clks), 167 .nr_div_clks = ARRAY_SIZE(topc_div_clks),
168 .gate_clks = topc_gate_clks,
169 .nr_gate_clks = ARRAY_SIZE(topc_gate_clks),
150 .fixed_factor_clks = topc_fixed_factor_clks, 170 .fixed_factor_clks = topc_fixed_factor_clks,
151 .nr_fixed_factor_clks = ARRAY_SIZE(topc_fixed_factor_clks), 171 .nr_fixed_factor_clks = ARRAY_SIZE(topc_fixed_factor_clks),
152 .nr_clk_ids = TOPC_NR_CLK, 172 .nr_clk_ids = TOPC_NR_CLK,
@@ -166,9 +186,18 @@ CLK_OF_DECLARE(exynos7_clk_topc, "samsung,exynos7-clock-topc",
166#define MUX_SEL_TOP00 0x0200 186#define MUX_SEL_TOP00 0x0200
167#define MUX_SEL_TOP01 0x0204 187#define MUX_SEL_TOP01 0x0204
168#define MUX_SEL_TOP03 0x020C 188#define MUX_SEL_TOP03 0x020C
189#define MUX_SEL_TOP0_PERIC0 0x0230
190#define MUX_SEL_TOP0_PERIC1 0x0234
191#define MUX_SEL_TOP0_PERIC2 0x0238
169#define MUX_SEL_TOP0_PERIC3 0x023C 192#define MUX_SEL_TOP0_PERIC3 0x023C
170#define DIV_TOP03 0x060C 193#define DIV_TOP03 0x060C
194#define DIV_TOP0_PERIC0 0x0630
195#define DIV_TOP0_PERIC1 0x0634
196#define DIV_TOP0_PERIC2 0x0638
171#define DIV_TOP0_PERIC3 0x063C 197#define DIV_TOP0_PERIC3 0x063C
198#define ENABLE_SCLK_TOP0_PERIC0 0x0A30
199#define ENABLE_SCLK_TOP0_PERIC1 0x0A34
200#define ENABLE_SCLK_TOP0_PERIC2 0x0A38
172#define ENABLE_SCLK_TOP0_PERIC3 0x0A3C 201#define ENABLE_SCLK_TOP0_PERIC3 0x0A3C
173 202
174/* List of parent clocks for Muxes in CMU_TOP0 */ 203/* List of parent clocks for Muxes in CMU_TOP0 */
@@ -176,6 +205,7 @@ PNAME(mout_bus0_pll_p) = { "fin_pll", "dout_sclk_bus0_pll" };
176PNAME(mout_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll" }; 205PNAME(mout_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll" };
177PNAME(mout_cc_pll_p) = { "fin_pll", "dout_sclk_cc_pll" }; 206PNAME(mout_cc_pll_p) = { "fin_pll", "dout_sclk_cc_pll" };
178PNAME(mout_mfc_pll_p) = { "fin_pll", "dout_sclk_mfc_pll" }; 207PNAME(mout_mfc_pll_p) = { "fin_pll", "dout_sclk_mfc_pll" };
208PNAME(mout_aud_pll_p) = { "fin_pll", "dout_sclk_aud_pll" };
179 209
180PNAME(mout_top0_half_bus0_pll_p) = {"mout_top0_bus0_pll", 210PNAME(mout_top0_half_bus0_pll_p) = {"mout_top0_bus0_pll",
181 "ffac_top0_bus0_pll_div2"}; 211 "ffac_top0_bus0_pll_div2"};
@@ -189,18 +219,34 @@ PNAME(mout_top0_half_mfc_pll_p) = {"mout_top0_mfc_pll",
189PNAME(mout_top0_group1) = {"mout_top0_half_bus0_pll", 219PNAME(mout_top0_group1) = {"mout_top0_half_bus0_pll",
190 "mout_top0_half_bus1_pll", "mout_top0_half_cc_pll", 220 "mout_top0_half_bus1_pll", "mout_top0_half_cc_pll",
191 "mout_top0_half_mfc_pll"}; 221 "mout_top0_half_mfc_pll"};
222PNAME(mout_top0_group3) = {"ioclk_audiocdclk0",
223 "ioclk_audiocdclk1", "ioclk_spdif_extclk",
224 "mout_top0_aud_pll", "mout_top0_half_bus0_pll",
225 "mout_top0_half_bus1_pll"};
226PNAME(mout_top0_group4) = {"ioclk_audiocdclk1", "mout_top0_aud_pll",
227 "mout_top0_half_bus0_pll", "mout_top0_half_bus1_pll"};
192 228
193static unsigned long top0_clk_regs[] __initdata = { 229static unsigned long top0_clk_regs[] __initdata = {
194 MUX_SEL_TOP00, 230 MUX_SEL_TOP00,
195 MUX_SEL_TOP01, 231 MUX_SEL_TOP01,
196 MUX_SEL_TOP03, 232 MUX_SEL_TOP03,
233 MUX_SEL_TOP0_PERIC0,
234 MUX_SEL_TOP0_PERIC1,
235 MUX_SEL_TOP0_PERIC2,
197 MUX_SEL_TOP0_PERIC3, 236 MUX_SEL_TOP0_PERIC3,
198 DIV_TOP03, 237 DIV_TOP03,
238 DIV_TOP0_PERIC0,
239 DIV_TOP0_PERIC1,
240 DIV_TOP0_PERIC2,
199 DIV_TOP0_PERIC3, 241 DIV_TOP0_PERIC3,
242 ENABLE_SCLK_TOP0_PERIC0,
243 ENABLE_SCLK_TOP0_PERIC1,
244 ENABLE_SCLK_TOP0_PERIC2,
200 ENABLE_SCLK_TOP0_PERIC3, 245 ENABLE_SCLK_TOP0_PERIC3,
201}; 246};
202 247
203static struct samsung_mux_clock top0_mux_clks[] __initdata = { 248static struct samsung_mux_clock top0_mux_clks[] __initdata = {
249 MUX(0, "mout_top0_aud_pll", mout_aud_pll_p, MUX_SEL_TOP00, 0, 1),
204 MUX(0, "mout_top0_mfc_pll", mout_mfc_pll_p, MUX_SEL_TOP00, 4, 1), 250 MUX(0, "mout_top0_mfc_pll", mout_mfc_pll_p, MUX_SEL_TOP00, 4, 1),
205 MUX(0, "mout_top0_cc_pll", mout_cc_pll_p, MUX_SEL_TOP00, 8, 1), 251 MUX(0, "mout_top0_cc_pll", mout_cc_pll_p, MUX_SEL_TOP00, 8, 1),
206 MUX(0, "mout_top0_bus1_pll", mout_bus1_pll_p, MUX_SEL_TOP00, 12, 1), 252 MUX(0, "mout_top0_bus1_pll", mout_bus1_pll_p, MUX_SEL_TOP00, 12, 1),
@@ -218,10 +264,20 @@ static struct samsung_mux_clock top0_mux_clks[] __initdata = {
218 MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2), 264 MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2),
219 MUX(0, "mout_aclk_peric0_66", mout_top0_group1, MUX_SEL_TOP03, 20, 2), 265 MUX(0, "mout_aclk_peric0_66", mout_top0_group1, MUX_SEL_TOP03, 20, 2),
220 266
267 MUX(0, "mout_sclk_spdif", mout_top0_group3, MUX_SEL_TOP0_PERIC0, 4, 3),
268 MUX(0, "mout_sclk_pcm1", mout_top0_group4, MUX_SEL_TOP0_PERIC0, 8, 2),
269 MUX(0, "mout_sclk_i2s1", mout_top0_group4, MUX_SEL_TOP0_PERIC0, 20, 2),
270
271 MUX(0, "mout_sclk_spi1", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 8, 2),
272 MUX(0, "mout_sclk_spi0", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 20, 2),
273
274 MUX(0, "mout_sclk_spi3", mout_top0_group1, MUX_SEL_TOP0_PERIC2, 8, 2),
275 MUX(0, "mout_sclk_spi2", mout_top0_group1, MUX_SEL_TOP0_PERIC2, 20, 2),
221 MUX(0, "mout_sclk_uart3", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 4, 2), 276 MUX(0, "mout_sclk_uart3", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 4, 2),
222 MUX(0, "mout_sclk_uart2", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 8, 2), 277 MUX(0, "mout_sclk_uart2", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 8, 2),
223 MUX(0, "mout_sclk_uart1", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 12, 2), 278 MUX(0, "mout_sclk_uart1", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 12, 2),
224 MUX(0, "mout_sclk_uart0", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 16, 2), 279 MUX(0, "mout_sclk_uart0", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 16, 2),
280 MUX(0, "mout_sclk_spi4", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 20, 2),
225}; 281};
226 282
227static struct samsung_div_clock top0_div_clks[] __initdata = { 283static struct samsung_div_clock top0_div_clks[] __initdata = {
@@ -230,13 +286,40 @@ static struct samsung_div_clock top0_div_clks[] __initdata = {
230 DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66", 286 DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66",
231 DIV_TOP03, 20, 6), 287 DIV_TOP03, 20, 6),
232 288
289 DIV(0, "dout_sclk_spdif", "mout_sclk_spdif", DIV_TOP0_PERIC0, 4, 4),
290 DIV(0, "dout_sclk_pcm1", "mout_sclk_pcm1", DIV_TOP0_PERIC0, 8, 12),
291 DIV(0, "dout_sclk_i2s1", "mout_sclk_i2s1", DIV_TOP0_PERIC0, 20, 10),
292
293 DIV(0, "dout_sclk_spi1", "mout_sclk_spi1", DIV_TOP0_PERIC1, 8, 12),
294 DIV(0, "dout_sclk_spi0", "mout_sclk_spi0", DIV_TOP0_PERIC1, 20, 12),
295
296 DIV(0, "dout_sclk_spi3", "mout_sclk_spi3", DIV_TOP0_PERIC2, 8, 12),
297 DIV(0, "dout_sclk_spi2", "mout_sclk_spi2", DIV_TOP0_PERIC2, 20, 12),
298
233 DIV(0, "dout_sclk_uart3", "mout_sclk_uart3", DIV_TOP0_PERIC3, 4, 4), 299 DIV(0, "dout_sclk_uart3", "mout_sclk_uart3", DIV_TOP0_PERIC3, 4, 4),
234 DIV(0, "dout_sclk_uart2", "mout_sclk_uart2", DIV_TOP0_PERIC3, 8, 4), 300 DIV(0, "dout_sclk_uart2", "mout_sclk_uart2", DIV_TOP0_PERIC3, 8, 4),
235 DIV(0, "dout_sclk_uart1", "mout_sclk_uart1", DIV_TOP0_PERIC3, 12, 4), 301 DIV(0, "dout_sclk_uart1", "mout_sclk_uart1", DIV_TOP0_PERIC3, 12, 4),
236 DIV(0, "dout_sclk_uart0", "mout_sclk_uart0", DIV_TOP0_PERIC3, 16, 4), 302 DIV(0, "dout_sclk_uart0", "mout_sclk_uart0", DIV_TOP0_PERIC3, 16, 4),
303 DIV(0, "dout_sclk_spi4", "mout_sclk_spi4", DIV_TOP0_PERIC3, 20, 12),
237}; 304};
238 305
239static struct samsung_gate_clock top0_gate_clks[] __initdata = { 306static struct samsung_gate_clock top0_gate_clks[] __initdata = {
307 GATE(CLK_SCLK_SPDIF, "sclk_spdif", "dout_sclk_spdif",
308 ENABLE_SCLK_TOP0_PERIC0, 4, CLK_SET_RATE_PARENT, 0),
309 GATE(CLK_SCLK_PCM1, "sclk_pcm1", "dout_sclk_pcm1",
310 ENABLE_SCLK_TOP0_PERIC0, 8, CLK_SET_RATE_PARENT, 0),
311 GATE(CLK_SCLK_I2S1, "sclk_i2s1", "dout_sclk_i2s1",
312 ENABLE_SCLK_TOP0_PERIC0, 20, CLK_SET_RATE_PARENT, 0),
313
314 GATE(CLK_SCLK_SPI1, "sclk_spi1", "dout_sclk_spi1",
315 ENABLE_SCLK_TOP0_PERIC1, 8, CLK_SET_RATE_PARENT, 0),
316 GATE(CLK_SCLK_SPI0, "sclk_spi0", "dout_sclk_spi0",
317 ENABLE_SCLK_TOP0_PERIC1, 20, CLK_SET_RATE_PARENT, 0),
318
319 GATE(CLK_SCLK_SPI3, "sclk_spi3", "dout_sclk_spi3",
320 ENABLE_SCLK_TOP0_PERIC2, 8, CLK_SET_RATE_PARENT, 0),
321 GATE(CLK_SCLK_SPI2, "sclk_spi2", "dout_sclk_spi2",
322 ENABLE_SCLK_TOP0_PERIC2, 20, CLK_SET_RATE_PARENT, 0),
240 GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_sclk_uart3", 323 GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_sclk_uart3",
241 ENABLE_SCLK_TOP0_PERIC3, 4, 0, 0), 324 ENABLE_SCLK_TOP0_PERIC3, 4, 0, 0),
242 GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_sclk_uart2", 325 GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_sclk_uart2",
@@ -245,6 +328,8 @@ static struct samsung_gate_clock top0_gate_clks[] __initdata = {
245 ENABLE_SCLK_TOP0_PERIC3, 12, 0, 0), 328 ENABLE_SCLK_TOP0_PERIC3, 12, 0, 0),
246 GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_sclk_uart0", 329 GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_sclk_uart0",
247 ENABLE_SCLK_TOP0_PERIC3, 16, 0, 0), 330 ENABLE_SCLK_TOP0_PERIC3, 16, 0, 0),
331 GATE(CLK_SCLK_SPI4, "sclk_spi4", "dout_sclk_spi4",
332 ENABLE_SCLK_TOP0_PERIC3, 20, CLK_SET_RATE_PARENT, 0),
248}; 333};
249 334
250static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = { 335static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = {
@@ -343,6 +428,8 @@ static struct samsung_mux_clock top1_mux_clks[] __initdata = {
343 MUX(0, "mout_aclk_fsys0_200", mout_top1_group1, MUX_SEL_TOP13, 28, 2), 428 MUX(0, "mout_aclk_fsys0_200", mout_top1_group1, MUX_SEL_TOP13, 28, 2),
344 429
345 MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 24, 2), 430 MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 24, 2),
431 MUX(0, "mout_sclk_usbdrd300", mout_top1_group1,
432 MUX_SEL_TOP1_FSYS0, 28, 2),
346 433
347 MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 24, 2), 434 MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 24, 2),
348 MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 28, 2), 435 MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 28, 2),
@@ -356,6 +443,8 @@ static struct samsung_div_clock top1_div_clks[] __initdata = {
356 443
357 DIV(DOUT_SCLK_MMC2, "dout_sclk_mmc2", "mout_sclk_mmc2", 444 DIV(DOUT_SCLK_MMC2, "dout_sclk_mmc2", "mout_sclk_mmc2",
358 DIV_TOP1_FSYS0, 24, 4), 445 DIV_TOP1_FSYS0, 24, 4),
446 DIV(0, "dout_sclk_usbdrd300", "mout_sclk_usbdrd300",
447 DIV_TOP1_FSYS0, 28, 4),
359 448
360 DIV(DOUT_SCLK_MMC1, "dout_sclk_mmc1", "mout_sclk_mmc1", 449 DIV(DOUT_SCLK_MMC1, "dout_sclk_mmc1", "mout_sclk_mmc1",
361 DIV_TOP1_FSYS1, 24, 4), 450 DIV_TOP1_FSYS1, 24, 4),
@@ -366,6 +455,8 @@ static struct samsung_div_clock top1_div_clks[] __initdata = {
366static struct samsung_gate_clock top1_gate_clks[] __initdata = { 455static struct samsung_gate_clock top1_gate_clks[] __initdata = {
367 GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2", 456 GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2",
368 ENABLE_SCLK_TOP1_FSYS0, 24, CLK_SET_RATE_PARENT, 0), 457 ENABLE_SCLK_TOP1_FSYS0, 24, CLK_SET_RATE_PARENT, 0),
458 GATE(0, "sclk_usbdrd300", "dout_sclk_usbdrd300",
459 ENABLE_SCLK_TOP1_FSYS0, 28, 0, 0),
369 460
370 GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_sclk_mmc1", 461 GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_sclk_mmc1",
371 ENABLE_SCLK_TOP1_FSYS1, 24, CLK_SET_RATE_PARENT, 0), 462 ENABLE_SCLK_TOP1_FSYS1, 24, CLK_SET_RATE_PARENT, 0),
@@ -514,6 +605,7 @@ static void __init exynos7_clk_peric0_init(struct device_node *np)
514/* Register Offset definitions for CMU_PERIC1 (0x14C80000) */ 605/* Register Offset definitions for CMU_PERIC1 (0x14C80000) */
515#define MUX_SEL_PERIC10 0x0200 606#define MUX_SEL_PERIC10 0x0200
516#define MUX_SEL_PERIC11 0x0204 607#define MUX_SEL_PERIC11 0x0204
608#define MUX_SEL_PERIC12 0x0208
517#define ENABLE_PCLK_PERIC1 0x0900 609#define ENABLE_PCLK_PERIC1 0x0900
518#define ENABLE_SCLK_PERIC10 0x0A00 610#define ENABLE_SCLK_PERIC10 0x0A00
519 611
@@ -525,10 +617,16 @@ PNAME(mout_aclk_peric1_66_p) = { "fin_pll", "dout_aclk_peric1_66" };
525PNAME(mout_sclk_uart1_p) = { "fin_pll", "sclk_uart1" }; 617PNAME(mout_sclk_uart1_p) = { "fin_pll", "sclk_uart1" };
526PNAME(mout_sclk_uart2_p) = { "fin_pll", "sclk_uart2" }; 618PNAME(mout_sclk_uart2_p) = { "fin_pll", "sclk_uart2" };
527PNAME(mout_sclk_uart3_p) = { "fin_pll", "sclk_uart3" }; 619PNAME(mout_sclk_uart3_p) = { "fin_pll", "sclk_uart3" };
620PNAME(mout_sclk_spi0_p) = { "fin_pll", "sclk_spi0" };
621PNAME(mout_sclk_spi1_p) = { "fin_pll", "sclk_spi1" };
622PNAME(mout_sclk_spi2_p) = { "fin_pll", "sclk_spi2" };
623PNAME(mout_sclk_spi3_p) = { "fin_pll", "sclk_spi3" };
624PNAME(mout_sclk_spi4_p) = { "fin_pll", "sclk_spi4" };
528 625
529static unsigned long peric1_clk_regs[] __initdata = { 626static unsigned long peric1_clk_regs[] __initdata = {
530 MUX_SEL_PERIC10, 627 MUX_SEL_PERIC10,
531 MUX_SEL_PERIC11, 628 MUX_SEL_PERIC11,
629 MUX_SEL_PERIC12,
532 ENABLE_PCLK_PERIC1, 630 ENABLE_PCLK_PERIC1,
533 ENABLE_SCLK_PERIC10, 631 ENABLE_SCLK_PERIC10,
534}; 632};
@@ -537,6 +635,16 @@ static struct samsung_mux_clock peric1_mux_clks[] __initdata = {
537 MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_p, 635 MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_p,
538 MUX_SEL_PERIC10, 0, 1), 636 MUX_SEL_PERIC10, 0, 1),
539 637
638 MUX_F(0, "mout_sclk_spi0_user", mout_sclk_spi0_p,
639 MUX_SEL_PERIC11, 0, 1, CLK_SET_RATE_PARENT, 0),
640 MUX_F(0, "mout_sclk_spi1_user", mout_sclk_spi1_p,
641 MUX_SEL_PERIC11, 4, 1, CLK_SET_RATE_PARENT, 0),
642 MUX_F(0, "mout_sclk_spi2_user", mout_sclk_spi2_p,
643 MUX_SEL_PERIC11, 8, 1, CLK_SET_RATE_PARENT, 0),
644 MUX_F(0, "mout_sclk_spi3_user", mout_sclk_spi3_p,
645 MUX_SEL_PERIC11, 12, 1, CLK_SET_RATE_PARENT, 0),
646 MUX_F(0, "mout_sclk_spi4_user", mout_sclk_spi4_p,
647 MUX_SEL_PERIC11, 16, 1, CLK_SET_RATE_PARENT, 0),
540 MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_p, 648 MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_p,
541 MUX_SEL_PERIC11, 20, 1), 649 MUX_SEL_PERIC11, 20, 1),
542 MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_p, 650 MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_p,
@@ -562,6 +670,22 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
562 ENABLE_PCLK_PERIC1, 10, 0, 0), 670 ENABLE_PCLK_PERIC1, 10, 0, 0),
563 GATE(PCLK_UART3, "pclk_uart3", "mout_aclk_peric1_66_user", 671 GATE(PCLK_UART3, "pclk_uart3", "mout_aclk_peric1_66_user",
564 ENABLE_PCLK_PERIC1, 11, 0, 0), 672 ENABLE_PCLK_PERIC1, 11, 0, 0),
673 GATE(PCLK_SPI0, "pclk_spi0", "mout_aclk_peric1_66_user",
674 ENABLE_PCLK_PERIC1, 12, 0, 0),
675 GATE(PCLK_SPI1, "pclk_spi1", "mout_aclk_peric1_66_user",
676 ENABLE_PCLK_PERIC1, 13, 0, 0),
677 GATE(PCLK_SPI2, "pclk_spi2", "mout_aclk_peric1_66_user",
678 ENABLE_PCLK_PERIC1, 14, 0, 0),
679 GATE(PCLK_SPI3, "pclk_spi3", "mout_aclk_peric1_66_user",
680 ENABLE_PCLK_PERIC1, 15, 0, 0),
681 GATE(PCLK_SPI4, "pclk_spi4", "mout_aclk_peric1_66_user",
682 ENABLE_PCLK_PERIC1, 16, 0, 0),
683 GATE(PCLK_I2S1, "pclk_i2s1", "mout_aclk_peric1_66_user",
684 ENABLE_PCLK_PERIC1, 17, CLK_SET_RATE_PARENT, 0),
685 GATE(PCLK_PCM1, "pclk_pcm1", "mout_aclk_peric1_66_user",
686 ENABLE_PCLK_PERIC1, 18, 0, 0),
687 GATE(PCLK_SPDIF, "pclk_spdif", "mout_aclk_peric1_66_user",
688 ENABLE_PCLK_PERIC1, 19, 0, 0),
565 689
566 GATE(SCLK_UART1, "sclk_uart1_user", "mout_sclk_uart1_user", 690 GATE(SCLK_UART1, "sclk_uart1_user", "mout_sclk_uart1_user",
567 ENABLE_SCLK_PERIC10, 9, 0, 0), 691 ENABLE_SCLK_PERIC10, 9, 0, 0),
@@ -569,6 +693,22 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
569 ENABLE_SCLK_PERIC10, 10, 0, 0), 693 ENABLE_SCLK_PERIC10, 10, 0, 0),
570 GATE(SCLK_UART3, "sclk_uart3_user", "mout_sclk_uart3_user", 694 GATE(SCLK_UART3, "sclk_uart3_user", "mout_sclk_uart3_user",
571 ENABLE_SCLK_PERIC10, 11, 0, 0), 695 ENABLE_SCLK_PERIC10, 11, 0, 0),
696 GATE(SCLK_SPI0, "sclk_spi0_user", "mout_sclk_spi0_user",
697 ENABLE_SCLK_PERIC10, 12, CLK_SET_RATE_PARENT, 0),
698 GATE(SCLK_SPI1, "sclk_spi1_user", "mout_sclk_spi1_user",
699 ENABLE_SCLK_PERIC10, 13, CLK_SET_RATE_PARENT, 0),
700 GATE(SCLK_SPI2, "sclk_spi2_user", "mout_sclk_spi2_user",
701 ENABLE_SCLK_PERIC10, 14, CLK_SET_RATE_PARENT, 0),
702 GATE(SCLK_SPI3, "sclk_spi3_user", "mout_sclk_spi3_user",
703 ENABLE_SCLK_PERIC10, 15, CLK_SET_RATE_PARENT, 0),
704 GATE(SCLK_SPI4, "sclk_spi4_user", "mout_sclk_spi4_user",
705 ENABLE_SCLK_PERIC10, 16, CLK_SET_RATE_PARENT, 0),
706 GATE(SCLK_I2S1, "sclk_i2s1_user", "sclk_i2s1",
707 ENABLE_SCLK_PERIC10, 17, CLK_SET_RATE_PARENT, 0),
708 GATE(SCLK_PCM1, "sclk_pcm1_user", "sclk_pcm1",
709 ENABLE_SCLK_PERIC10, 18, CLK_SET_RATE_PARENT, 0),
710 GATE(SCLK_SPDIF, "sclk_spdif_user", "sclk_spdif",
711 ENABLE_SCLK_PERIC10, 19, CLK_SET_RATE_PARENT, 0),
572}; 712};
573 713
574static struct samsung_cmu_info peric1_cmu_info __initdata = { 714static struct samsung_cmu_info peric1_cmu_info __initdata = {
@@ -647,7 +787,12 @@ CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
647/* Register Offset definitions for CMU_FSYS0 (0x10E90000) */ 787/* Register Offset definitions for CMU_FSYS0 (0x10E90000) */
648#define MUX_SEL_FSYS00 0x0200 788#define MUX_SEL_FSYS00 0x0200
649#define MUX_SEL_FSYS01 0x0204 789#define MUX_SEL_FSYS01 0x0204
790#define MUX_SEL_FSYS02 0x0208
791#define ENABLE_ACLK_FSYS00 0x0800
650#define ENABLE_ACLK_FSYS01 0x0804 792#define ENABLE_ACLK_FSYS01 0x0804
793#define ENABLE_SCLK_FSYS01 0x0A04
794#define ENABLE_SCLK_FSYS02 0x0A08
795#define ENABLE_SCLK_FSYS04 0x0A10
651 796
652/* 797/*
653 * List of parent clocks for Muxes in CMU_FSYS0 798 * List of parent clocks for Muxes in CMU_FSYS0
@@ -655,10 +800,29 @@ CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
655PNAME(mout_aclk_fsys0_200_p) = { "fin_pll", "dout_aclk_fsys0_200" }; 800PNAME(mout_aclk_fsys0_200_p) = { "fin_pll", "dout_aclk_fsys0_200" };
656PNAME(mout_sclk_mmc2_p) = { "fin_pll", "sclk_mmc2" }; 801PNAME(mout_sclk_mmc2_p) = { "fin_pll", "sclk_mmc2" };
657 802
803PNAME(mout_sclk_usbdrd300_p) = { "fin_pll", "sclk_usbdrd300" };
804PNAME(mout_phyclk_usbdrd300_udrd30_phyclk_p) = { "fin_pll",
805 "phyclk_usbdrd300_udrd30_phyclock" };
806PNAME(mout_phyclk_usbdrd300_udrd30_pipe_pclk_p) = { "fin_pll",
807 "phyclk_usbdrd300_udrd30_pipe_pclk" };
808
809/* fixed rate clocks used in the FSYS0 block */
810struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initdata = {
811 FRATE(0, "phyclk_usbdrd300_udrd30_phyclock", NULL,
812 CLK_IS_ROOT, 60000000),
813 FRATE(0, "phyclk_usbdrd300_udrd30_pipe_pclk", NULL,
814 CLK_IS_ROOT, 125000000),
815};
816
658static unsigned long fsys0_clk_regs[] __initdata = { 817static unsigned long fsys0_clk_regs[] __initdata = {
659 MUX_SEL_FSYS00, 818 MUX_SEL_FSYS00,
660 MUX_SEL_FSYS01, 819 MUX_SEL_FSYS01,
820 MUX_SEL_FSYS02,
821 ENABLE_ACLK_FSYS00,
661 ENABLE_ACLK_FSYS01, 822 ENABLE_ACLK_FSYS01,
823 ENABLE_SCLK_FSYS01,
824 ENABLE_SCLK_FSYS02,
825 ENABLE_SCLK_FSYS04,
662}; 826};
663 827
664static struct samsung_mux_clock fsys0_mux_clks[] __initdata = { 828static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
@@ -666,11 +830,49 @@ static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
666 MUX_SEL_FSYS00, 24, 1), 830 MUX_SEL_FSYS00, 24, 1),
667 831
668 MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_p, MUX_SEL_FSYS01, 24, 1), 832 MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_p, MUX_SEL_FSYS01, 24, 1),
833 MUX(0, "mout_sclk_usbdrd300_user", mout_sclk_usbdrd300_p,
834 MUX_SEL_FSYS01, 28, 1),
835
836 MUX(0, "mout_phyclk_usbdrd300_udrd30_pipe_pclk_user",
837 mout_phyclk_usbdrd300_udrd30_pipe_pclk_p,
838 MUX_SEL_FSYS02, 24, 1),
839 MUX(0, "mout_phyclk_usbdrd300_udrd30_phyclk_user",
840 mout_phyclk_usbdrd300_udrd30_phyclk_p,
841 MUX_SEL_FSYS02, 28, 1),
669}; 842};
670 843
671static struct samsung_gate_clock fsys0_gate_clks[] __initdata = { 844static struct samsung_gate_clock fsys0_gate_clks[] __initdata = {
845 GATE(ACLK_AXIUS_USBDRD30X_FSYS0X, "aclk_axius_usbdrd30x_fsys0x",
846 "mout_aclk_fsys0_200_user",
847 ENABLE_ACLK_FSYS00, 19, 0, 0),
848 GATE(ACLK_PDMA1, "aclk_pdma1", "mout_aclk_fsys0_200_user",
849 ENABLE_ACLK_FSYS00, 3, 0, 0),
850 GATE(ACLK_PDMA0, "aclk_pdma0", "mout_aclk_fsys0_200_user",
851 ENABLE_ACLK_FSYS00, 4, 0, 0),
852
853 GATE(ACLK_USBDRD300, "aclk_usbdrd300", "mout_aclk_fsys0_200_user",
854 ENABLE_ACLK_FSYS01, 29, 0, 0),
672 GATE(ACLK_MMC2, "aclk_mmc2", "mout_aclk_fsys0_200_user", 855 GATE(ACLK_MMC2, "aclk_mmc2", "mout_aclk_fsys0_200_user",
673 ENABLE_ACLK_FSYS01, 31, 0, 0), 856 ENABLE_ACLK_FSYS01, 31, 0, 0),
857
858 GATE(SCLK_USBDRD300_SUSPENDCLK, "sclk_usbdrd300_suspendclk",
859 "mout_sclk_usbdrd300_user",
860 ENABLE_SCLK_FSYS01, 4, 0, 0),
861 GATE(SCLK_USBDRD300_REFCLK, "sclk_usbdrd300_refclk", "fin_pll",
862 ENABLE_SCLK_FSYS01, 8, 0, 0),
863
864 GATE(PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER,
865 "phyclk_usbdrd300_udrd30_pipe_pclk_user",
866 "mout_phyclk_usbdrd300_udrd30_pipe_pclk_user",
867 ENABLE_SCLK_FSYS02, 24, 0, 0),
868 GATE(PHYCLK_USBDRD300_UDRD30_PHYCLK_USER,
869 "phyclk_usbdrd300_udrd30_phyclk_user",
870 "mout_phyclk_usbdrd300_udrd30_phyclk_user",
871 ENABLE_SCLK_FSYS02, 28, 0, 0),
872
873 GATE(OSCCLK_PHY_CLKOUT_USB30_PHY, "oscclk_phy_clkout_usb30_phy",
874 "fin_pll",
875 ENABLE_SCLK_FSYS04, 28, 0, 0),
674}; 876};
675 877
676static struct samsung_cmu_info fsys0_cmu_info __initdata = { 878static struct samsung_cmu_info fsys0_cmu_info __initdata = {
@@ -741,3 +943,205 @@ static void __init exynos7_clk_fsys1_init(struct device_node *np)
741 943
742CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1", 944CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1",
743 exynos7_clk_fsys1_init); 945 exynos7_clk_fsys1_init);
946
947#define MUX_SEL_MSCL 0x0200
948#define DIV_MSCL 0x0600
949#define ENABLE_ACLK_MSCL 0x0800
950#define ENABLE_PCLK_MSCL 0x0900
951
952/* List of parent clocks for Muxes in CMU_MSCL */
953PNAME(mout_aclk_mscl_532_user_p) = { "fin_pll", "aclk_mscl_532" };
954
955static unsigned long mscl_clk_regs[] __initdata = {
956 MUX_SEL_MSCL,
957 DIV_MSCL,
958 ENABLE_ACLK_MSCL,
959 ENABLE_PCLK_MSCL,
960};
961
962static struct samsung_mux_clock mscl_mux_clks[] __initdata = {
963 MUX(USERMUX_ACLK_MSCL_532, "usermux_aclk_mscl_532",
964 mout_aclk_mscl_532_user_p, MUX_SEL_MSCL, 0, 1),
965};
966static struct samsung_div_clock mscl_div_clks[] __initdata = {
967 DIV(DOUT_PCLK_MSCL, "dout_pclk_mscl", "usermux_aclk_mscl_532",
968 DIV_MSCL, 0, 3),
969};
970static struct samsung_gate_clock mscl_gate_clks[] __initdata = {
971
972 GATE(ACLK_MSCL_0, "aclk_mscl_0", "usermux_aclk_mscl_532",
973 ENABLE_ACLK_MSCL, 31, 0, 0),
974 GATE(ACLK_MSCL_1, "aclk_mscl_1", "usermux_aclk_mscl_532",
975 ENABLE_ACLK_MSCL, 30, 0, 0),
976 GATE(ACLK_JPEG, "aclk_jpeg", "usermux_aclk_mscl_532",
977 ENABLE_ACLK_MSCL, 29, 0, 0),
978 GATE(ACLK_G2D, "aclk_g2d", "usermux_aclk_mscl_532",
979 ENABLE_ACLK_MSCL, 28, 0, 0),
980 GATE(ACLK_LH_ASYNC_SI_MSCL_0, "aclk_lh_async_si_mscl_0",
981 "usermux_aclk_mscl_532",
982 ENABLE_ACLK_MSCL, 27, 0, 0),
983 GATE(ACLK_LH_ASYNC_SI_MSCL_1, "aclk_lh_async_si_mscl_1",
984 "usermux_aclk_mscl_532",
985 ENABLE_ACLK_MSCL, 26, 0, 0),
986 GATE(ACLK_XIU_MSCLX_0, "aclk_xiu_msclx_0", "usermux_aclk_mscl_532",
987 ENABLE_ACLK_MSCL, 25, 0, 0),
988 GATE(ACLK_XIU_MSCLX_1, "aclk_xiu_msclx_1", "usermux_aclk_mscl_532",
989 ENABLE_ACLK_MSCL, 24, 0, 0),
990 GATE(ACLK_AXI2ACEL_BRIDGE, "aclk_axi2acel_bridge",
991 "usermux_aclk_mscl_532",
992 ENABLE_ACLK_MSCL, 23, 0, 0),
993 GATE(ACLK_QE_MSCL_0, "aclk_qe_mscl_0", "usermux_aclk_mscl_532",
994 ENABLE_ACLK_MSCL, 22, 0, 0),
995 GATE(ACLK_QE_MSCL_1, "aclk_qe_mscl_1", "usermux_aclk_mscl_532",
996 ENABLE_ACLK_MSCL, 21, 0, 0),
997 GATE(ACLK_QE_JPEG, "aclk_qe_jpeg", "usermux_aclk_mscl_532",
998 ENABLE_ACLK_MSCL, 20, 0, 0),
999 GATE(ACLK_QE_G2D, "aclk_qe_g2d", "usermux_aclk_mscl_532",
1000 ENABLE_ACLK_MSCL, 19, 0, 0),
1001 GATE(ACLK_PPMU_MSCL_0, "aclk_ppmu_mscl_0", "usermux_aclk_mscl_532",
1002 ENABLE_ACLK_MSCL, 18, 0, 0),
1003 GATE(ACLK_PPMU_MSCL_1, "aclk_ppmu_mscl_1", "usermux_aclk_mscl_532",
1004 ENABLE_ACLK_MSCL, 17, 0, 0),
1005 GATE(ACLK_MSCLNP_133, "aclk_msclnp_133", "usermux_aclk_mscl_532",
1006 ENABLE_ACLK_MSCL, 16, 0, 0),
1007 GATE(ACLK_AHB2APB_MSCL0P, "aclk_ahb2apb_mscl0p",
1008 "usermux_aclk_mscl_532",
1009 ENABLE_ACLK_MSCL, 15, 0, 0),
1010 GATE(ACLK_AHB2APB_MSCL1P, "aclk_ahb2apb_mscl1p",
1011 "usermux_aclk_mscl_532",
1012 ENABLE_ACLK_MSCL, 14, 0, 0),
1013
1014 GATE(PCLK_MSCL_0, "pclk_mscl_0", "dout_pclk_mscl",
1015 ENABLE_PCLK_MSCL, 31, 0, 0),
1016 GATE(PCLK_MSCL_1, "pclk_mscl_1", "dout_pclk_mscl",
1017 ENABLE_PCLK_MSCL, 30, 0, 0),
1018 GATE(PCLK_JPEG, "pclk_jpeg", "dout_pclk_mscl",
1019 ENABLE_PCLK_MSCL, 29, 0, 0),
1020 GATE(PCLK_G2D, "pclk_g2d", "dout_pclk_mscl",
1021 ENABLE_PCLK_MSCL, 28, 0, 0),
1022 GATE(PCLK_QE_MSCL_0, "pclk_qe_mscl_0", "dout_pclk_mscl",
1023 ENABLE_PCLK_MSCL, 27, 0, 0),
1024 GATE(PCLK_QE_MSCL_1, "pclk_qe_mscl_1", "dout_pclk_mscl",
1025 ENABLE_PCLK_MSCL, 26, 0, 0),
1026 GATE(PCLK_QE_JPEG, "pclk_qe_jpeg", "dout_pclk_mscl",
1027 ENABLE_PCLK_MSCL, 25, 0, 0),
1028 GATE(PCLK_QE_G2D, "pclk_qe_g2d", "dout_pclk_mscl",
1029 ENABLE_PCLK_MSCL, 24, 0, 0),
1030 GATE(PCLK_PPMU_MSCL_0, "pclk_ppmu_mscl_0", "dout_pclk_mscl",
1031 ENABLE_PCLK_MSCL, 23, 0, 0),
1032 GATE(PCLK_PPMU_MSCL_1, "pclk_ppmu_mscl_1", "dout_pclk_mscl",
1033 ENABLE_PCLK_MSCL, 22, 0, 0),
1034 GATE(PCLK_AXI2ACEL_BRIDGE, "pclk_axi2acel_bridge", "dout_pclk_mscl",
1035 ENABLE_PCLK_MSCL, 21, 0, 0),
1036 GATE(PCLK_PMU_MSCL, "pclk_pmu_mscl", "dout_pclk_mscl",
1037 ENABLE_PCLK_MSCL, 20, 0, 0),
1038};
1039
1040static struct samsung_cmu_info mscl_cmu_info __initdata = {
1041 .mux_clks = mscl_mux_clks,
1042 .nr_mux_clks = ARRAY_SIZE(mscl_mux_clks),
1043 .div_clks = mscl_div_clks,
1044 .nr_div_clks = ARRAY_SIZE(mscl_div_clks),
1045 .gate_clks = mscl_gate_clks,
1046 .nr_gate_clks = ARRAY_SIZE(mscl_gate_clks),
1047 .nr_clk_ids = MSCL_NR_CLK,
1048 .clk_regs = mscl_clk_regs,
1049 .nr_clk_regs = ARRAY_SIZE(mscl_clk_regs),
1050};
1051
1052static void __init exynos7_clk_mscl_init(struct device_node *np)
1053{
1054 samsung_cmu_register_one(np, &mscl_cmu_info);
1055}
1056
1057CLK_OF_DECLARE(exynos7_clk_mscl, "samsung,exynos7-clock-mscl",
1058 exynos7_clk_mscl_init);
1059
1060/* Register Offset definitions for CMU_AUD (0x114C0000) */
1061#define MUX_SEL_AUD 0x0200
1062#define DIV_AUD0 0x0600
1063#define DIV_AUD1 0x0604
1064#define ENABLE_ACLK_AUD 0x0800
1065#define ENABLE_PCLK_AUD 0x0900
1066#define ENABLE_SCLK_AUD 0x0A00
1067
1068/*
1069 * List of parent clocks for Muxes in CMU_AUD
1070 */
1071PNAME(mout_aud_pll_user_p) = { "fin_pll", "fout_aud_pll" };
1072PNAME(mout_aud_group_p) = { "dout_aud_cdclk", "ioclk_audiocdclk0" };
1073
1074static unsigned long aud_clk_regs[] __initdata = {
1075 MUX_SEL_AUD,
1076 DIV_AUD0,
1077 DIV_AUD1,
1078 ENABLE_ACLK_AUD,
1079 ENABLE_PCLK_AUD,
1080 ENABLE_SCLK_AUD,
1081};
1082
1083static struct samsung_mux_clock aud_mux_clks[] __initdata = {
1084 MUX(0, "mout_sclk_i2s", mout_aud_group_p, MUX_SEL_AUD, 12, 1),
1085 MUX(0, "mout_sclk_pcm", mout_aud_group_p, MUX_SEL_AUD, 16, 1),
1086 MUX(0, "mout_aud_pll_user", mout_aud_pll_user_p, MUX_SEL_AUD, 20, 1),
1087};
1088
1089static struct samsung_div_clock aud_div_clks[] __initdata = {
1090 DIV(0, "dout_aud_ca5", "mout_aud_pll_user", DIV_AUD0, 0, 4),
1091 DIV(0, "dout_aclk_aud", "dout_aud_ca5", DIV_AUD0, 4, 4),
1092 DIV(0, "dout_aud_pclk_dbg", "dout_aud_ca5", DIV_AUD0, 8, 4),
1093
1094 DIV(0, "dout_sclk_i2s", "mout_sclk_i2s", DIV_AUD1, 0, 4),
1095 DIV(0, "dout_sclk_pcm", "mout_sclk_pcm", DIV_AUD1, 4, 8),
1096 DIV(0, "dout_sclk_uart", "dout_aud_cdclk", DIV_AUD1, 12, 4),
1097 DIV(0, "dout_sclk_slimbus", "dout_aud_cdclk", DIV_AUD1, 16, 5),
1098 DIV(0, "dout_aud_cdclk", "mout_aud_pll_user", DIV_AUD1, 24, 4),
1099};
1100
1101static struct samsung_gate_clock aud_gate_clks[] __initdata = {
1102 GATE(SCLK_PCM, "sclk_pcm", "dout_sclk_pcm",
1103 ENABLE_SCLK_AUD, 27, CLK_SET_RATE_PARENT, 0),
1104 GATE(SCLK_I2S, "sclk_i2s", "dout_sclk_i2s",
1105 ENABLE_SCLK_AUD, 28, CLK_SET_RATE_PARENT, 0),
1106 GATE(0, "sclk_uart", "dout_sclk_uart", ENABLE_SCLK_AUD, 29, 0, 0),
1107 GATE(0, "sclk_slimbus", "dout_sclk_slimbus",
1108 ENABLE_SCLK_AUD, 30, 0, 0),
1109
1110 GATE(0, "pclk_dbg_aud", "dout_aud_pclk_dbg", ENABLE_PCLK_AUD, 19, 0, 0),
1111 GATE(0, "pclk_gpio_aud", "dout_aclk_aud", ENABLE_PCLK_AUD, 20, 0, 0),
1112 GATE(0, "pclk_wdt1", "dout_aclk_aud", ENABLE_PCLK_AUD, 22, 0, 0),
1113 GATE(0, "pclk_wdt0", "dout_aclk_aud", ENABLE_PCLK_AUD, 23, 0, 0),
1114 GATE(0, "pclk_slimbus", "dout_aclk_aud", ENABLE_PCLK_AUD, 24, 0, 0),
1115 GATE(0, "pclk_uart", "dout_aclk_aud", ENABLE_PCLK_AUD, 25, 0, 0),
1116 GATE(PCLK_PCM, "pclk_pcm", "dout_aclk_aud",
1117 ENABLE_PCLK_AUD, 26, CLK_SET_RATE_PARENT, 0),
1118 GATE(PCLK_I2S, "pclk_i2s", "dout_aclk_aud",
1119 ENABLE_PCLK_AUD, 27, CLK_SET_RATE_PARENT, 0),
1120 GATE(0, "pclk_timer", "dout_aclk_aud", ENABLE_PCLK_AUD, 28, 0, 0),
1121 GATE(0, "pclk_smmu_aud", "dout_aclk_aud", ENABLE_PCLK_AUD, 31, 0, 0),
1122
1123 GATE(0, "aclk_smmu_aud", "dout_aclk_aud", ENABLE_ACLK_AUD, 27, 0, 0),
1124 GATE(0, "aclk_acel_lh_async_si_top", "dout_aclk_aud",
1125 ENABLE_ACLK_AUD, 28, 0, 0),
1126 GATE(ACLK_ADMA, "aclk_dmac", "dout_aclk_aud", ENABLE_ACLK_AUD, 31, 0, 0),
1127};
1128
1129static struct samsung_cmu_info aud_cmu_info __initdata = {
1130 .mux_clks = aud_mux_clks,
1131 .nr_mux_clks = ARRAY_SIZE(aud_mux_clks),
1132 .div_clks = aud_div_clks,
1133 .nr_div_clks = ARRAY_SIZE(aud_div_clks),
1134 .gate_clks = aud_gate_clks,
1135 .nr_gate_clks = ARRAY_SIZE(aud_gate_clks),
1136 .nr_clk_ids = AUD_NR_CLK,
1137 .clk_regs = aud_clk_regs,
1138 .nr_clk_regs = ARRAY_SIZE(aud_clk_regs),
1139};
1140
1141static void __init exynos7_clk_aud_init(struct device_node *np)
1142{
1143 samsung_cmu_register_one(np, &aud_cmu_info);
1144}
1145
1146CLK_OF_DECLARE(exynos7_clk_aud, "samsung,exynos7-clock-aud",
1147 exynos7_clk_aud_init);
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 4bda54095a16..9e1f88c04fd4 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -374,19 +374,24 @@ static void samsung_clk_sleep_init(void __iomem *reg_base,
374 * Common function which registers plls, muxes, dividers and gates 374 * Common function which registers plls, muxes, dividers and gates
375 * for each CMU. It also add CMU register list to register cache. 375 * for each CMU. It also add CMU register list to register cache.
376 */ 376 */
377void __init samsung_cmu_register_one(struct device_node *np, 377struct samsung_clk_provider * __init samsung_cmu_register_one(
378 struct device_node *np,
378 struct samsung_cmu_info *cmu) 379 struct samsung_cmu_info *cmu)
379{ 380{
380 void __iomem *reg_base; 381 void __iomem *reg_base;
381 struct samsung_clk_provider *ctx; 382 struct samsung_clk_provider *ctx;
382 383
383 reg_base = of_iomap(np, 0); 384 reg_base = of_iomap(np, 0);
384 if (!reg_base) 385 if (!reg_base) {
385 panic("%s: failed to map registers\n", __func__); 386 panic("%s: failed to map registers\n", __func__);
387 return NULL;
388 }
386 389
387 ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids); 390 ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
388 if (!ctx) 391 if (!ctx) {
389 panic("%s: unable to alllocate ctx\n", __func__); 392 panic("%s: unable to alllocate ctx\n", __func__);
393 return ctx;
394 }
390 395
391 if (cmu->pll_clks) 396 if (cmu->pll_clks)
392 samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks, 397 samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
@@ -410,4 +415,6 @@ void __init samsung_cmu_register_one(struct device_node *np,
410 cmu->nr_clk_regs); 415 cmu->nr_clk_regs);
411 416
412 samsung_clk_of_add_provider(np, ctx); 417 samsung_clk_of_add_provider(np, ctx);
418
419 return ctx;
413} 420}
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 8acabe1f32c4..e4c75383cea7 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -392,7 +392,8 @@ extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
392 struct samsung_pll_clock *pll_list, 392 struct samsung_pll_clock *pll_list,
393 unsigned int nr_clk, void __iomem *base); 393 unsigned int nr_clk, void __iomem *base);
394 394
395extern void __init samsung_cmu_register_one(struct device_node *, 395extern struct samsung_clk_provider __init *samsung_cmu_register_one(
396 struct device_node *,
396 struct samsung_cmu_info *); 397 struct samsung_cmu_info *);
397 398
398extern unsigned long _get_rate(const char *clk_name); 399extern unsigned long _get_rate(const char *clk_name);
diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/shmobile/Makefile
index f83980f2b956..0689d7fb2666 100644
--- a/drivers/clk/shmobile/Makefile
+++ b/drivers/clk/shmobile/Makefile
@@ -1,9 +1,11 @@
1obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o 1obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o
2obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o 2obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o
3obj-$(CONFIG_ARCH_R8A73A4) += clk-r8a73a4.o
3obj-$(CONFIG_ARCH_R8A7740) += clk-r8a7740.o 4obj-$(CONFIG_ARCH_R8A7740) += clk-r8a7740.o
4obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o 5obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o
5obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o 6obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o
6obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o 7obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o
8obj-$(CONFIG_ARCH_R8A7793) += clk-rcar-gen2.o
7obj-$(CONFIG_ARCH_R8A7794) += clk-rcar-gen2.o 9obj-$(CONFIG_ARCH_R8A7794) += clk-rcar-gen2.o
8obj-$(CONFIG_ARCH_SH73A0) += clk-sh73a0.o 10obj-$(CONFIG_ARCH_SH73A0) += clk-sh73a0.o
9obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-div6.o 11obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-div6.o
diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c
index 639241e31e03..036a692c7219 100644
--- a/drivers/clk/shmobile/clk-div6.c
+++ b/drivers/clk/shmobile/clk-div6.c
@@ -54,12 +54,19 @@ static int cpg_div6_clock_enable(struct clk_hw *hw)
54static void cpg_div6_clock_disable(struct clk_hw *hw) 54static void cpg_div6_clock_disable(struct clk_hw *hw)
55{ 55{
56 struct div6_clock *clock = to_div6_clock(hw); 56 struct div6_clock *clock = to_div6_clock(hw);
57 u32 val;
57 58
58 /* DIV6 clocks require the divisor field to be non-zero when stopping 59 val = clk_readl(clock->reg);
59 * the clock. 60 val |= CPG_DIV6_CKSTP;
61 /*
62 * DIV6 clocks require the divisor field to be non-zero when stopping
63 * the clock. However, some clocks (e.g. ZB on sh73a0) fail to be
64 * re-enabled later if the divisor field is changed when stopping the
65 * clock
60 */ 66 */
61 clk_writel(clk_readl(clock->reg) | CPG_DIV6_CKSTP | CPG_DIV6_DIV_MASK, 67 if (!(val & CPG_DIV6_DIV_MASK))
62 clock->reg); 68 val |= CPG_DIV6_DIV_MASK;
69 clk_writel(val, clock->reg);
63} 70}
64 71
65static int cpg_div6_clock_is_enabled(struct clk_hw *hw) 72static int cpg_div6_clock_is_enabled(struct clk_hw *hw)
@@ -83,6 +90,9 @@ static unsigned int cpg_div6_clock_calc_div(unsigned long rate,
83{ 90{
84 unsigned int div; 91 unsigned int div;
85 92
93 if (!rate)
94 rate = 1;
95
86 div = DIV_ROUND_CLOSEST(parent_rate, rate); 96 div = DIV_ROUND_CLOSEST(parent_rate, rate);
87 return clamp_t(unsigned int, div, 1, 64); 97 return clamp_t(unsigned int, div, 1, 64);
88} 98}
diff --git a/drivers/clk/shmobile/clk-r8a73a4.c b/drivers/clk/shmobile/clk-r8a73a4.c
new file mode 100644
index 000000000000..29b9a0b0012a
--- /dev/null
+++ b/drivers/clk/shmobile/clk-r8a73a4.c
@@ -0,0 +1,241 @@
1/*
2 * r8a73a4 Core CPG Clocks
3 *
4 * Copyright (C) 2014 Ulrich Hecht
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 */
10
11#include <linux/clk-provider.h>
12#include <linux/clkdev.h>
13#include <linux/clk/shmobile.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/spinlock.h>
19
20struct r8a73a4_cpg {
21 struct clk_onecell_data data;
22 spinlock_t lock;
23 void __iomem *reg;
24};
25
26#define CPG_CKSCR 0xc0
27#define CPG_FRQCRA 0x00
28#define CPG_FRQCRB 0x04
29#define CPG_FRQCRC 0xe0
30#define CPG_PLL0CR 0xd8
31#define CPG_PLL1CR 0x28
32#define CPG_PLL2CR 0x2c
33#define CPG_PLL2HCR 0xe4
34#define CPG_PLL2SCR 0xf4
35
36#define CLK_ENABLE_ON_INIT BIT(0)
37
38struct div4_clk {
39 const char *name;
40 unsigned int reg;
41 unsigned int shift;
42};
43
44static struct div4_clk div4_clks[] = {
45 { "i", CPG_FRQCRA, 20 },
46 { "m3", CPG_FRQCRA, 12 },
47 { "b", CPG_FRQCRA, 8 },
48 { "m1", CPG_FRQCRA, 4 },
49 { "m2", CPG_FRQCRA, 0 },
50 { "zx", CPG_FRQCRB, 12 },
51 { "zs", CPG_FRQCRB, 8 },
52 { "hp", CPG_FRQCRB, 4 },
53 { NULL, 0, 0 },
54};
55
56static const struct clk_div_table div4_div_table[] = {
57 { 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 }, { 4, 8 }, { 5, 12 },
58 { 6, 16 }, { 7, 18 }, { 8, 24 }, { 10, 36 }, { 11, 48 },
59 { 12, 10 }, { 0, 0 }
60};
61
62static struct clk * __init
63r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
64 const char *name)
65{
66 const struct clk_div_table *table = NULL;
67 const char *parent_name;
68 unsigned int shift, reg;
69 unsigned int mult = 1;
70 unsigned int div = 1;
71
72
73 if (!strcmp(name, "main")) {
74 u32 ckscr = clk_readl(cpg->reg + CPG_CKSCR);
75
76 switch ((ckscr >> 28) & 3) {
77 case 0: /* extal1 */
78 parent_name = of_clk_get_parent_name(np, 0);
79 break;
80 case 1: /* extal1 / 2 */
81 parent_name = of_clk_get_parent_name(np, 0);
82 div = 2;
83 break;
84 case 2: /* extal2 */
85 parent_name = of_clk_get_parent_name(np, 1);
86 break;
87 case 3: /* extal2 / 2 */
88 parent_name = of_clk_get_parent_name(np, 1);
89 div = 2;
90 break;
91 }
92 } else if (!strcmp(name, "pll0")) {
93 /* PLL0/1 are configurable multiplier clocks. Register them as
94 * fixed factor clocks for now as there's no generic multiplier
95 * clock implementation and we currently have no need to change
96 * the multiplier value.
97 */
98 u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
99
100 parent_name = "main";
101 mult = ((value >> 24) & 0x7f) + 1;
102 if (value & BIT(20))
103 div = 2;
104 } else if (!strcmp(name, "pll1")) {
105 u32 value = clk_readl(cpg->reg + CPG_PLL1CR);
106
107 parent_name = "main";
108 /* XXX: enable bit? */
109 mult = ((value >> 24) & 0x7f) + 1;
110 if (value & BIT(7))
111 div = 2;
112 } else if (!strncmp(name, "pll2", 4)) {
113 u32 value, cr;
114
115 switch (name[4]) {
116 case 0:
117 cr = CPG_PLL2CR;
118 break;
119 case 's':
120 cr = CPG_PLL2SCR;
121 break;
122 case 'h':
123 cr = CPG_PLL2HCR;
124 break;
125 default:
126 return ERR_PTR(-EINVAL);
127 }
128 value = clk_readl(cpg->reg + cr);
129 switch ((value >> 5) & 7) {
130 case 0:
131 parent_name = "main";
132 div = 2;
133 break;
134 case 1:
135 parent_name = "extal2";
136 div = 2;
137 break;
138 case 3:
139 parent_name = "extal2";
140 div = 4;
141 break;
142 case 4:
143 parent_name = "main";
144 break;
145 case 5:
146 parent_name = "extal2";
147 break;
148 default:
149 pr_warn("%s: unexpected parent of %s\n", __func__,
150 name);
151 return ERR_PTR(-EINVAL);
152 }
153 /* XXX: enable bit? */
154 mult = ((value >> 24) & 0x7f) + 1;
155 } else if (!strcmp(name, "z") || !strcmp(name, "z2")) {
156 u32 shift = 8;
157
158 parent_name = "pll0";
159 if (name[1] == '2') {
160 div = 2;
161 shift = 0;
162 }
163 div *= 32;
164 mult = 0x20 - ((clk_readl(cpg->reg + CPG_FRQCRC) >> shift)
165 & 0x1f);
166 } else {
167 struct div4_clk *c;
168
169 for (c = div4_clks; c->name; c++) {
170 if (!strcmp(name, c->name))
171 break;
172 }
173 if (!c->name)
174 return ERR_PTR(-EINVAL);
175
176 parent_name = "pll1";
177 table = div4_div_table;
178 reg = c->reg;
179 shift = c->shift;
180 }
181
182 if (!table) {
183 return clk_register_fixed_factor(NULL, name, parent_name, 0,
184 mult, div);
185 } else {
186 return clk_register_divider_table(NULL, name, parent_name, 0,
187 cpg->reg + reg, shift, 4, 0,
188 table, &cpg->lock);
189 }
190}
191
192static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
193{
194 struct r8a73a4_cpg *cpg;
195 struct clk **clks;
196 unsigned int i;
197 int num_clks;
198
199 num_clks = of_property_count_strings(np, "clock-output-names");
200 if (num_clks < 0) {
201 pr_err("%s: failed to count clocks\n", __func__);
202 return;
203 }
204
205 cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
206 clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
207 if (cpg == NULL || clks == NULL) {
208 /* We're leaking memory on purpose, there's no point in cleaning
209 * up as the system won't boot anyway.
210 */
211 return;
212 }
213
214 spin_lock_init(&cpg->lock);
215
216 cpg->data.clks = clks;
217 cpg->data.clk_num = num_clks;
218
219 cpg->reg = of_iomap(np, 0);
220 if (WARN_ON(cpg->reg == NULL))
221 return;
222
223 for (i = 0; i < num_clks; ++i) {
224 const char *name;
225 struct clk *clk;
226
227 of_property_read_string_index(np, "clock-output-names", i,
228 &name);
229
230 clk = r8a73a4_cpg_register_clock(np, cpg, name);
231 if (IS_ERR(clk))
232 pr_err("%s: failed to register %s %s clock (%ld)\n",
233 __func__, np->name, name, PTR_ERR(clk));
234 else
235 cpg->data.clks[i] = clk;
236 }
237
238 of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
239}
240CLK_OF_DECLARE(r8a73a4_cpg_clks, "renesas,r8a73a4-cpg-clocks",
241 r8a73a4_cpg_clocks_init);
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c
index e996425d06a9..acfb6d7dbd6b 100644
--- a/drivers/clk/shmobile/clk-rcar-gen2.c
+++ b/drivers/clk/shmobile/clk-rcar-gen2.c
@@ -33,6 +33,8 @@ struct rcar_gen2_cpg {
33#define CPG_FRQCRC 0x000000e0 33#define CPG_FRQCRC 0x000000e0
34#define CPG_FRQCRC_ZFC_MASK (0x1f << 8) 34#define CPG_FRQCRC_ZFC_MASK (0x1f << 8)
35#define CPG_FRQCRC_ZFC_SHIFT 8 35#define CPG_FRQCRC_ZFC_SHIFT 8
36#define CPG_ADSPCKCR 0x0000025c
37#define CPG_RCANCKCR 0x00000270
36 38
37/* ----------------------------------------------------------------------------- 39/* -----------------------------------------------------------------------------
38 * Z Clock 40 * Z Clock
@@ -161,6 +163,88 @@ static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg)
161 return clk; 163 return clk;
162} 164}
163 165
166static struct clk * __init cpg_rcan_clk_register(struct rcar_gen2_cpg *cpg,
167 struct device_node *np)
168{
169 const char *parent_name = of_clk_get_parent_name(np, 1);
170 struct clk_fixed_factor *fixed;
171 struct clk_gate *gate;
172 struct clk *clk;
173
174 fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
175 if (!fixed)
176 return ERR_PTR(-ENOMEM);
177
178 fixed->mult = 1;
179 fixed->div = 6;
180
181 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
182 if (!gate) {
183 kfree(fixed);
184 return ERR_PTR(-ENOMEM);
185 }
186
187 gate->reg = cpg->reg + CPG_RCANCKCR;
188 gate->bit_idx = 8;
189 gate->flags = CLK_GATE_SET_TO_DISABLE;
190 gate->lock = &cpg->lock;
191
192 clk = clk_register_composite(NULL, "rcan", &parent_name, 1, NULL, NULL,
193 &fixed->hw, &clk_fixed_factor_ops,
194 &gate->hw, &clk_gate_ops, 0);
195 if (IS_ERR(clk)) {
196 kfree(gate);
197 kfree(fixed);
198 }
199
200 return clk;
201}
202
203/* ADSP divisors */
204static const struct clk_div_table cpg_adsp_div_table[] = {
205 { 1, 3 }, { 2, 4 }, { 3, 6 }, { 4, 8 },
206 { 5, 12 }, { 6, 16 }, { 7, 18 }, { 8, 24 },
207 { 10, 36 }, { 11, 48 }, { 0, 0 },
208};
209
210static struct clk * __init cpg_adsp_clk_register(struct rcar_gen2_cpg *cpg)
211{
212 const char *parent_name = "pll1";
213 struct clk_divider *div;
214 struct clk_gate *gate;
215 struct clk *clk;
216
217 div = kzalloc(sizeof(*div), GFP_KERNEL);
218 if (!div)
219 return ERR_PTR(-ENOMEM);
220
221 div->reg = cpg->reg + CPG_ADSPCKCR;
222 div->width = 4;
223 div->table = cpg_adsp_div_table;
224 div->lock = &cpg->lock;
225
226 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
227 if (!gate) {
228 kfree(div);
229 return ERR_PTR(-ENOMEM);
230 }
231
232 gate->reg = cpg->reg + CPG_ADSPCKCR;
233 gate->bit_idx = 8;
234 gate->flags = CLK_GATE_SET_TO_DISABLE;
235 gate->lock = &cpg->lock;
236
237 clk = clk_register_composite(NULL, "adsp", &parent_name, 1, NULL, NULL,
238 &div->hw, &clk_divider_ops,
239 &gate->hw, &clk_gate_ops, 0);
240 if (IS_ERR(clk)) {
241 kfree(gate);
242 kfree(div);
243 }
244
245 return clk;
246}
247
164/* ----------------------------------------------------------------------------- 248/* -----------------------------------------------------------------------------
165 * CPG Clock Data 249 * CPG Clock Data
166 */ 250 */
@@ -263,6 +347,10 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
263 shift = 0; 347 shift = 0;
264 } else if (!strcmp(name, "z")) { 348 } else if (!strcmp(name, "z")) {
265 return cpg_z_clk_register(cpg); 349 return cpg_z_clk_register(cpg);
350 } else if (!strcmp(name, "rcan")) {
351 return cpg_rcan_clk_register(cpg, np);
352 } else if (!strcmp(name, "adsp")) {
353 return cpg_adsp_clk_register(cpg);
266 } else { 354 } else {
267 return ERR_PTR(-EINVAL); 355 return ERR_PTR(-EINVAL);
268 } 356 }
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 2282cef9f2ff..bf12a25eb3a2 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -37,8 +37,8 @@ static int flexgen_enable(struct clk_hw *hw)
37 struct clk_hw *pgate_hw = &flexgen->pgate.hw; 37 struct clk_hw *pgate_hw = &flexgen->pgate.hw;
38 struct clk_hw *fgate_hw = &flexgen->fgate.hw; 38 struct clk_hw *fgate_hw = &flexgen->fgate.hw;
39 39
40 pgate_hw->clk = hw->clk; 40 __clk_hw_set_clk(pgate_hw, hw);
41 fgate_hw->clk = hw->clk; 41 __clk_hw_set_clk(fgate_hw, hw);
42 42
43 clk_gate_ops.enable(pgate_hw); 43 clk_gate_ops.enable(pgate_hw);
44 44
@@ -54,7 +54,7 @@ static void flexgen_disable(struct clk_hw *hw)
54 struct clk_hw *fgate_hw = &flexgen->fgate.hw; 54 struct clk_hw *fgate_hw = &flexgen->fgate.hw;
55 55
56 /* disable only the final gate */ 56 /* disable only the final gate */
57 fgate_hw->clk = hw->clk; 57 __clk_hw_set_clk(fgate_hw, hw);
58 58
59 clk_gate_ops.disable(fgate_hw); 59 clk_gate_ops.disable(fgate_hw);
60 60
@@ -66,7 +66,7 @@ static int flexgen_is_enabled(struct clk_hw *hw)
66 struct flexgen *flexgen = to_flexgen(hw); 66 struct flexgen *flexgen = to_flexgen(hw);
67 struct clk_hw *fgate_hw = &flexgen->fgate.hw; 67 struct clk_hw *fgate_hw = &flexgen->fgate.hw;
68 68
69 fgate_hw->clk = hw->clk; 69 __clk_hw_set_clk(fgate_hw, hw);
70 70
71 if (!clk_gate_ops.is_enabled(fgate_hw)) 71 if (!clk_gate_ops.is_enabled(fgate_hw))
72 return 0; 72 return 0;
@@ -79,7 +79,7 @@ static u8 flexgen_get_parent(struct clk_hw *hw)
79 struct flexgen *flexgen = to_flexgen(hw); 79 struct flexgen *flexgen = to_flexgen(hw);
80 struct clk_hw *mux_hw = &flexgen->mux.hw; 80 struct clk_hw *mux_hw = &flexgen->mux.hw;
81 81
82 mux_hw->clk = hw->clk; 82 __clk_hw_set_clk(mux_hw, hw);
83 83
84 return clk_mux_ops.get_parent(mux_hw); 84 return clk_mux_ops.get_parent(mux_hw);
85} 85}
@@ -89,7 +89,7 @@ static int flexgen_set_parent(struct clk_hw *hw, u8 index)
89 struct flexgen *flexgen = to_flexgen(hw); 89 struct flexgen *flexgen = to_flexgen(hw);
90 struct clk_hw *mux_hw = &flexgen->mux.hw; 90 struct clk_hw *mux_hw = &flexgen->mux.hw;
91 91
92 mux_hw->clk = hw->clk; 92 __clk_hw_set_clk(mux_hw, hw);
93 93
94 return clk_mux_ops.set_parent(mux_hw, index); 94 return clk_mux_ops.set_parent(mux_hw, index);
95} 95}
@@ -124,8 +124,8 @@ unsigned long flexgen_recalc_rate(struct clk_hw *hw,
124 struct clk_hw *fdiv_hw = &flexgen->fdiv.hw; 124 struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
125 unsigned long mid_rate; 125 unsigned long mid_rate;
126 126
127 pdiv_hw->clk = hw->clk; 127 __clk_hw_set_clk(pdiv_hw, hw);
128 fdiv_hw->clk = hw->clk; 128 __clk_hw_set_clk(fdiv_hw, hw);
129 129
130 mid_rate = clk_divider_ops.recalc_rate(pdiv_hw, parent_rate); 130 mid_rate = clk_divider_ops.recalc_rate(pdiv_hw, parent_rate);
131 131
@@ -138,16 +138,27 @@ static int flexgen_set_rate(struct clk_hw *hw, unsigned long rate,
138 struct flexgen *flexgen = to_flexgen(hw); 138 struct flexgen *flexgen = to_flexgen(hw);
139 struct clk_hw *pdiv_hw = &flexgen->pdiv.hw; 139 struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
140 struct clk_hw *fdiv_hw = &flexgen->fdiv.hw; 140 struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
141 unsigned long primary_div = 0; 141 unsigned long div = 0;
142 int ret = 0; 142 int ret = 0;
143 143
144 pdiv_hw->clk = hw->clk; 144 __clk_hw_set_clk(pdiv_hw, hw);
145 fdiv_hw->clk = hw->clk; 145 __clk_hw_set_clk(fdiv_hw, hw);
146 146
147 primary_div = clk_best_div(parent_rate, rate); 147 div = clk_best_div(parent_rate, rate);
148 148
149 clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate); 149 /*
150 ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * primary_div); 150 * pdiv is mainly targeted for low freq results, while fdiv
151 * should be used for div <= 64. The other way round can
152 * lead to 'duty cycle' issues.
153 */
154
155 if (div <= 64) {
156 clk_divider_ops.set_rate(pdiv_hw, parent_rate, parent_rate);
157 ret = clk_divider_ops.set_rate(fdiv_hw, rate, rate * div);
158 } else {
159 clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate);
160 ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * div);
161 }
151 162
152 return ret; 163 return ret;
153} 164}
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index 79dc40b5cc68..9a15ec344a85 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -94,7 +94,7 @@ static int clkgena_divmux_enable(struct clk_hw *hw)
94 unsigned long timeout; 94 unsigned long timeout;
95 int ret = 0; 95 int ret = 0;
96 96
97 mux_hw->clk = hw->clk; 97 __clk_hw_set_clk(mux_hw, hw);
98 98
99 ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel); 99 ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel);
100 if (ret) 100 if (ret)
@@ -116,7 +116,7 @@ static void clkgena_divmux_disable(struct clk_hw *hw)
116 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 116 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
117 struct clk_hw *mux_hw = &genamux->mux.hw; 117 struct clk_hw *mux_hw = &genamux->mux.hw;
118 118
119 mux_hw->clk = hw->clk; 119 __clk_hw_set_clk(mux_hw, hw);
120 120
121 clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF); 121 clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF);
122} 122}
@@ -126,7 +126,7 @@ static int clkgena_divmux_is_enabled(struct clk_hw *hw)
126 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 126 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
127 struct clk_hw *mux_hw = &genamux->mux.hw; 127 struct clk_hw *mux_hw = &genamux->mux.hw;
128 128
129 mux_hw->clk = hw->clk; 129 __clk_hw_set_clk(mux_hw, hw);
130 130
131 return (s8)clk_mux_ops.get_parent(mux_hw) > 0; 131 return (s8)clk_mux_ops.get_parent(mux_hw) > 0;
132} 132}
@@ -136,7 +136,7 @@ u8 clkgena_divmux_get_parent(struct clk_hw *hw)
136 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 136 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
137 struct clk_hw *mux_hw = &genamux->mux.hw; 137 struct clk_hw *mux_hw = &genamux->mux.hw;
138 138
139 mux_hw->clk = hw->clk; 139 __clk_hw_set_clk(mux_hw, hw);
140 140
141 genamux->muxsel = clk_mux_ops.get_parent(mux_hw); 141 genamux->muxsel = clk_mux_ops.get_parent(mux_hw);
142 if ((s8)genamux->muxsel < 0) { 142 if ((s8)genamux->muxsel < 0) {
@@ -174,7 +174,7 @@ unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw,
174 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 174 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
175 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; 175 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
176 176
177 div_hw->clk = hw->clk; 177 __clk_hw_set_clk(div_hw, hw);
178 178
179 return clk_divider_ops.recalc_rate(div_hw, parent_rate); 179 return clk_divider_ops.recalc_rate(div_hw, parent_rate);
180} 180}
@@ -185,7 +185,7 @@ static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate,
185 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 185 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
186 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; 186 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
187 187
188 div_hw->clk = hw->clk; 188 __clk_hw_set_clk(div_hw, hw);
189 189
190 return clk_divider_ops.set_rate(div_hw, rate, parent_rate); 190 return clk_divider_ops.set_rate(div_hw, rate, parent_rate);
191} 191}
@@ -196,7 +196,7 @@ static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate,
196 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 196 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
197 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; 197 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
198 198
199 div_hw->clk = hw->clk; 199 __clk_hw_set_clk(div_hw, hw);
200 200
201 return clk_divider_ops.round_rate(div_hw, rate, prate); 201 return clk_divider_ops.round_rate(div_hw, rate, prate);
202} 202}
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index a66953c0f430..3a5292e3fcf8 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -8,6 +8,7 @@ obj-y += clk-a20-gmac.o
8obj-y += clk-mod0.o 8obj-y += clk-mod0.o
9obj-y += clk-sun8i-mbus.o 9obj-y += clk-sun8i-mbus.o
10obj-y += clk-sun9i-core.o 10obj-y += clk-sun9i-core.o
11obj-y += clk-sun9i-mmc.o
11 12
12obj-$(CONFIG_MFD_SUN6I_PRCM) += \ 13obj-$(CONFIG_MFD_SUN6I_PRCM) += \
13 clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \ 14 clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index 62e08fb58554..8c20190a3e9f 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -80,6 +80,8 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
80} 80}
81 81
82static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate, 82static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
83 unsigned long min_rate,
84 unsigned long max_rate,
83 unsigned long *best_parent_rate, 85 unsigned long *best_parent_rate,
84 struct clk_hw **best_parent_p) 86 struct clk_hw **best_parent_p)
85{ 87{
@@ -156,9 +158,10 @@ static const struct clk_ops clk_factors_ops = {
156 .set_rate = clk_factors_set_rate, 158 .set_rate = clk_factors_set_rate,
157}; 159};
158 160
159struct clk * __init sunxi_factors_register(struct device_node *node, 161struct clk *sunxi_factors_register(struct device_node *node,
160 const struct factors_data *data, 162 const struct factors_data *data,
161 spinlock_t *lock) 163 spinlock_t *lock,
164 void __iomem *reg)
162{ 165{
163 struct clk *clk; 166 struct clk *clk;
164 struct clk_factors *factors; 167 struct clk_factors *factors;
@@ -168,11 +171,8 @@ struct clk * __init sunxi_factors_register(struct device_node *node,
168 struct clk_hw *mux_hw = NULL; 171 struct clk_hw *mux_hw = NULL;
169 const char *clk_name = node->name; 172 const char *clk_name = node->name;
170 const char *parents[FACTORS_MAX_PARENTS]; 173 const char *parents[FACTORS_MAX_PARENTS];
171 void __iomem *reg;
172 int i = 0; 174 int i = 0;
173 175
174 reg = of_iomap(node, 0);
175
176 /* if we have a mux, we will have >1 parents */ 176 /* if we have a mux, we will have >1 parents */
177 while (i < FACTORS_MAX_PARENTS && 177 while (i < FACTORS_MAX_PARENTS &&
178 (parents[i] = of_clk_get_parent_name(node, i)) != NULL) 178 (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
index 912238fde132..171085ab5513 100644
--- a/drivers/clk/sunxi/clk-factors.h
+++ b/drivers/clk/sunxi/clk-factors.h
@@ -36,8 +36,9 @@ struct clk_factors {
36 spinlock_t *lock; 36 spinlock_t *lock;
37}; 37};
38 38
39struct clk * __init sunxi_factors_register(struct device_node *node, 39struct clk *sunxi_factors_register(struct device_node *node,
40 const struct factors_data *data, 40 const struct factors_data *data,
41 spinlock_t *lock); 41 spinlock_t *lock,
42 void __iomem *reg);
42 43
43#endif 44#endif
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index da0524eaee94..ec8f5a1fca09 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -17,6 +17,7 @@
17#include <linux/clk-provider.h> 17#include <linux/clk-provider.h>
18#include <linux/clkdev.h> 18#include <linux/clkdev.h>
19#include <linux/of_address.h> 19#include <linux/of_address.h>
20#include <linux/platform_device.h>
20 21
21#include "clk-factors.h" 22#include "clk-factors.h"
22 23
@@ -67,7 +68,7 @@ static struct clk_factors_config sun4i_a10_mod0_config = {
67 .pwidth = 2, 68 .pwidth = 2,
68}; 69};
69 70
70static const struct factors_data sun4i_a10_mod0_data __initconst = { 71static const struct factors_data sun4i_a10_mod0_data = {
71 .enable = 31, 72 .enable = 31,
72 .mux = 24, 73 .mux = 24,
73 .muxmask = BIT(1) | BIT(0), 74 .muxmask = BIT(1) | BIT(0),
@@ -79,15 +80,95 @@ static DEFINE_SPINLOCK(sun4i_a10_mod0_lock);
79 80
80static void __init sun4i_a10_mod0_setup(struct device_node *node) 81static void __init sun4i_a10_mod0_setup(struct device_node *node)
81{ 82{
82 sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun4i_a10_mod0_lock); 83 void __iomem *reg;
84
85 reg = of_iomap(node, 0);
86 if (!reg) {
87 /*
88 * This happens with mod0 clk nodes instantiated through
89 * mfd, as those do not have their resources assigned at
90 * CLK_OF_DECLARE time yet, so do not print an error.
91 */
92 return;
93 }
94
95 sunxi_factors_register(node, &sun4i_a10_mod0_data,
96 &sun4i_a10_mod0_lock, reg);
83} 97}
84CLK_OF_DECLARE(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk", sun4i_a10_mod0_setup); 98CLK_OF_DECLARE(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk", sun4i_a10_mod0_setup);
85 99
100static int sun4i_a10_mod0_clk_probe(struct platform_device *pdev)
101{
102 struct device_node *np = pdev->dev.of_node;
103 struct resource *r;
104 void __iomem *reg;
105
106 if (!np)
107 return -ENODEV;
108
109 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
110 reg = devm_ioremap_resource(&pdev->dev, r);
111 if (IS_ERR(reg))
112 return PTR_ERR(reg);
113
114 sunxi_factors_register(np, &sun4i_a10_mod0_data,
115 &sun4i_a10_mod0_lock, reg);
116 return 0;
117}
118
119static const struct of_device_id sun4i_a10_mod0_clk_dt_ids[] = {
120 { .compatible = "allwinner,sun4i-a10-mod0-clk" },
121 { /* sentinel */ }
122};
123
124static struct platform_driver sun4i_a10_mod0_clk_driver = {
125 .driver = {
126 .name = "sun4i-a10-mod0-clk",
127 .of_match_table = sun4i_a10_mod0_clk_dt_ids,
128 },
129 .probe = sun4i_a10_mod0_clk_probe,
130};
131module_platform_driver(sun4i_a10_mod0_clk_driver);
132
133static const struct factors_data sun9i_a80_mod0_data __initconst = {
134 .enable = 31,
135 .mux = 24,
136 .muxmask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
137 .table = &sun4i_a10_mod0_config,
138 .getter = sun4i_a10_get_mod0_factors,
139};
140
141static void __init sun9i_a80_mod0_setup(struct device_node *node)
142{
143 void __iomem *reg;
144
145 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
146 if (IS_ERR(reg)) {
147 pr_err("Could not get registers for mod0-clk: %s\n",
148 node->name);
149 return;
150 }
151
152 sunxi_factors_register(node, &sun9i_a80_mod0_data,
153 &sun4i_a10_mod0_lock, reg);
154}
155CLK_OF_DECLARE(sun9i_a80_mod0, "allwinner,sun9i-a80-mod0-clk", sun9i_a80_mod0_setup);
156
86static DEFINE_SPINLOCK(sun5i_a13_mbus_lock); 157static DEFINE_SPINLOCK(sun5i_a13_mbus_lock);
87 158
88static void __init sun5i_a13_mbus_setup(struct device_node *node) 159static void __init sun5i_a13_mbus_setup(struct device_node *node)
89{ 160{
90 struct clk *mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun5i_a13_mbus_lock); 161 struct clk *mbus;
162 void __iomem *reg;
163
164 reg = of_iomap(node, 0);
165 if (!reg) {
166 pr_err("Could not get registers for a13-mbus-clk\n");
167 return;
168 }
169
170 mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data,
171 &sun5i_a13_mbus_lock, reg);
91 172
92 /* The MBUS clocks needs to be always enabled */ 173 /* The MBUS clocks needs to be always enabled */
93 __clk_get(mbus); 174 __clk_get(mbus);
@@ -95,14 +176,10 @@ static void __init sun5i_a13_mbus_setup(struct device_node *node)
95} 176}
96CLK_OF_DECLARE(sun5i_a13_mbus, "allwinner,sun5i-a13-mbus-clk", sun5i_a13_mbus_setup); 177CLK_OF_DECLARE(sun5i_a13_mbus, "allwinner,sun5i-a13-mbus-clk", sun5i_a13_mbus_setup);
97 178
98struct mmc_phase_data {
99 u8 offset;
100};
101
102struct mmc_phase { 179struct mmc_phase {
103 struct clk_hw hw; 180 struct clk_hw hw;
181 u8 offset;
104 void __iomem *reg; 182 void __iomem *reg;
105 struct mmc_phase_data *data;
106 spinlock_t *lock; 183 spinlock_t *lock;
107}; 184};
108 185
@@ -118,7 +195,7 @@ static int mmc_get_phase(struct clk_hw *hw)
118 u8 delay; 195 u8 delay;
119 196
120 value = readl(phase->reg); 197 value = readl(phase->reg);
121 delay = (value >> phase->data->offset) & 0x3; 198 delay = (value >> phase->offset) & 0x3;
122 199
123 if (!delay) 200 if (!delay)
124 return 180; 201 return 180;
@@ -206,8 +283,8 @@ static int mmc_set_phase(struct clk_hw *hw, int degrees)
206 283
207 spin_lock_irqsave(phase->lock, flags); 284 spin_lock_irqsave(phase->lock, flags);
208 value = readl(phase->reg); 285 value = readl(phase->reg);
209 value &= ~GENMASK(phase->data->offset + 3, phase->data->offset); 286 value &= ~GENMASK(phase->offset + 3, phase->offset);
210 value |= delay << phase->data->offset; 287 value |= delay << phase->offset;
211 writel(value, phase->reg); 288 writel(value, phase->reg);
212 spin_unlock_irqrestore(phase->lock, flags); 289 spin_unlock_irqrestore(phase->lock, flags);
213 290
@@ -219,66 +296,97 @@ static const struct clk_ops mmc_clk_ops = {
219 .set_phase = mmc_set_phase, 296 .set_phase = mmc_set_phase,
220}; 297};
221 298
222static void __init sun4i_a10_mmc_phase_setup(struct device_node *node, 299/*
223 struct mmc_phase_data *data) 300 * sunxi_mmc_setup - Common setup function for mmc module clocks
301 *
302 * The only difference between module clocks on different platforms is the
303 * width of the mux register bits and the valid values, which are passed in
304 * through struct factors_data. The phase clocks parts are identical.
305 */
306static void __init sunxi_mmc_setup(struct device_node *node,
307 const struct factors_data *data,
308 spinlock_t *lock)
224{ 309{
225 const char *parent_names[1] = { of_clk_get_parent_name(node, 0) }; 310 struct clk_onecell_data *clk_data;
226 struct clk_init_data init = { 311 const char *parent;
227 .num_parents = 1, 312 void __iomem *reg;
228 .parent_names = parent_names, 313 int i;
229 .ops = &mmc_clk_ops, 314
230 }; 315 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
231 316 if (IS_ERR(reg)) {
232 struct mmc_phase *phase; 317 pr_err("Couldn't map the %s clock registers\n", node->name);
233 struct clk *clk;
234
235 phase = kmalloc(sizeof(*phase), GFP_KERNEL);
236 if (!phase)
237 return; 318 return;
319 }
238 320
239 phase->hw.init = &init; 321 clk_data = kmalloc(sizeof(*clk_data), GFP_KERNEL);
240 322 if (!clk_data)
241 phase->reg = of_iomap(node, 0); 323 return;
242 if (!phase->reg)
243 goto err_free;
244
245 phase->data = data;
246 phase->lock = &sun4i_a10_mod0_lock;
247
248 if (of_property_read_string(node, "clock-output-names", &init.name))
249 init.name = node->name;
250 324
251 clk = clk_register(NULL, &phase->hw); 325 clk_data->clks = kcalloc(3, sizeof(*clk_data->clks), GFP_KERNEL);
252 if (IS_ERR(clk)) 326 if (!clk_data->clks)
253 goto err_unmap; 327 goto err_free_data;
328
329 clk_data->clk_num = 3;
330 clk_data->clks[0] = sunxi_factors_register(node, data, lock, reg);
331 if (!clk_data->clks[0])
332 goto err_free_clks;
333
334 parent = __clk_get_name(clk_data->clks[0]);
335
336 for (i = 1; i < 3; i++) {
337 struct clk_init_data init = {
338 .num_parents = 1,
339 .parent_names = &parent,
340 .ops = &mmc_clk_ops,
341 };
342 struct mmc_phase *phase;
343
344 phase = kmalloc(sizeof(*phase), GFP_KERNEL);
345 if (!phase)
346 continue;
347
348 phase->hw.init = &init;
349 phase->reg = reg;
350 phase->lock = lock;
351
352 if (i == 1)
353 phase->offset = 8;
354 else
355 phase->offset = 20;
356
357 if (of_property_read_string_index(node, "clock-output-names",
358 i, &init.name))
359 init.name = node->name;
360
361 clk_data->clks[i] = clk_register(NULL, &phase->hw);
362 if (IS_ERR(clk_data->clks[i])) {
363 kfree(phase);
364 continue;
365 }
366 }
254 367
255 of_clk_add_provider(node, of_clk_src_simple_get, clk); 368 of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
256 369
257 return; 370 return;
258 371
259err_unmap: 372err_free_clks:
260 iounmap(phase->reg); 373 kfree(clk_data->clks);
261err_free: 374err_free_data:
262 kfree(phase); 375 kfree(clk_data);
263} 376}
264 377
378static DEFINE_SPINLOCK(sun4i_a10_mmc_lock);
265 379
266static struct mmc_phase_data mmc_output_clk = { 380static void __init sun4i_a10_mmc_setup(struct device_node *node)
267 .offset = 8,
268};
269
270static struct mmc_phase_data mmc_sample_clk = {
271 .offset = 20,
272};
273
274static void __init sun4i_a10_mmc_output_setup(struct device_node *node)
275{ 381{
276 sun4i_a10_mmc_phase_setup(node, &mmc_output_clk); 382 sunxi_mmc_setup(node, &sun4i_a10_mod0_data, &sun4i_a10_mmc_lock);
277} 383}
278CLK_OF_DECLARE(sun4i_a10_mmc_output, "allwinner,sun4i-a10-mmc-output-clk", sun4i_a10_mmc_output_setup); 384CLK_OF_DECLARE(sun4i_a10_mmc, "allwinner,sun4i-a10-mmc-clk", sun4i_a10_mmc_setup);
385
386static DEFINE_SPINLOCK(sun9i_a80_mmc_lock);
279 387
280static void __init sun4i_a10_mmc_sample_setup(struct device_node *node) 388static void __init sun9i_a80_mmc_setup(struct device_node *node)
281{ 389{
282 sun4i_a10_mmc_phase_setup(node, &mmc_sample_clk); 390 sunxi_mmc_setup(node, &sun9i_a80_mod0_data, &sun9i_a80_mmc_lock);
283} 391}
284CLK_OF_DECLARE(sun4i_a10_mmc_sample, "allwinner,sun4i-a10-mmc-sample-clk", sun4i_a10_mmc_sample_setup); 392CLK_OF_DECLARE(sun9i_a80_mmc, "allwinner,sun9i-a80-mmc-clk", sun9i_a80_mmc_setup);
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index 3d282fb8f85c..63cf149195ae 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -45,6 +45,8 @@ static unsigned long ar100_recalc_rate(struct clk_hw *hw,
45} 45}
46 46
47static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate, 47static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
48 unsigned long min_rate,
49 unsigned long max_rate,
48 unsigned long *best_parent_rate, 50 unsigned long *best_parent_rate,
49 struct clk_hw **best_parent_clk) 51 struct clk_hw **best_parent_clk)
50{ 52{
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index ef49786eefd3..14cd026064bf 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -69,8 +69,17 @@ static DEFINE_SPINLOCK(sun8i_a23_mbus_lock);
69 69
70static void __init sun8i_a23_mbus_setup(struct device_node *node) 70static void __init sun8i_a23_mbus_setup(struct device_node *node)
71{ 71{
72 struct clk *mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data, 72 struct clk *mbus;
73 &sun8i_a23_mbus_lock); 73 void __iomem *reg;
74
75 reg = of_iomap(node, 0);
76 if (!reg) {
77 pr_err("Could not get registers for a23-mbus-clk\n");
78 return;
79 }
80
81 mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
82 &sun8i_a23_mbus_lock, reg);
74 83
75 /* The MBUS clocks needs to be always enabled */ 84 /* The MBUS clocks needs to be always enabled */
76 __clk_get(mbus); 85 __clk_get(mbus);
diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c
index 3cb9036d91bb..d8da77d72861 100644
--- a/drivers/clk/sunxi/clk-sun9i-core.c
+++ b/drivers/clk/sunxi/clk-sun9i-core.c
@@ -24,50 +24,51 @@
24 24
25 25
26/** 26/**
27 * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL1 27 * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL4
28 * PLL4 rate is calculated as follows 28 * PLL4 rate is calculated as follows
29 * rate = (parent_rate * n >> p) / (m + 1); 29 * rate = (parent_rate * n >> p) / (m + 1);
30 * parent_rate is always 24Mhz 30 * parent_rate is always 24MHz
31 * 31 *
32 * p and m are named div1 and div2 in Allwinner's SDK 32 * p and m are named div1 and div2 in Allwinner's SDK
33 */ 33 */
34 34
35static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate, 35static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
36 u8 *n, u8 *k, u8 *m, u8 *p) 36 u8 *n_ret, u8 *k, u8 *m_ret, u8 *p_ret)
37{ 37{
38 int div; 38 int n;
39 int m = 1;
40 int p = 1;
39 41
40 /* Normalize value to a 6M multiple */ 42 /* Normalize value to a 6 MHz multiple (24 MHz / 4) */
41 div = DIV_ROUND_UP(*freq, 6000000); 43 n = DIV_ROUND_UP(*freq, 6000000);
42 44
43 /* divs above 256 cannot be odd */ 45 /* If n is too large switch to steps of 12 MHz */
44 if (div > 256) 46 if (n > 255) {
45 div = round_up(div, 2); 47 m = 0;
48 n = (n + 1) / 2;
49 }
50
51 /* If n is still too large switch to steps of 24 MHz */
52 if (n > 255) {
53 p = 0;
54 n = (n + 1) / 2;
55 }
46 56
47 /* divs above 512 must be a multiple of 4 */ 57 /* n must be between 12 and 255 */
48 if (div > 512) 58 if (n > 255)
49 div = round_up(div, 4); 59 n = 255;
60 else if (n < 12)
61 n = 12;
50 62
51 *freq = 6000000 * div; 63 *freq = ((24000000 * n) >> p) / (m + 1);
52 64
53 /* we were called to round the frequency, we can now return */ 65 /* we were called to round the frequency, we can now return */
54 if (n == NULL) 66 if (n_ret == NULL)
55 return; 67 return;
56 68
57 /* p will be 1 for divs under 512 */ 69 *n_ret = n;
58 if (div < 512) 70 *m_ret = m;
59 *p = 1; 71 *p_ret = p;
60 else
61 *p = 0;
62
63 /* m will be 1 if div is odd */
64 if (div & 1)
65 *m = 1;
66 else
67 *m = 0;
68
69 /* calculate a suitable n based on m and p */
70 *n = div / (*p + 1) / (*m + 1);
71} 72}
72 73
73static struct clk_factors_config sun9i_a80_pll4_config = { 74static struct clk_factors_config sun9i_a80_pll4_config = {
@@ -89,7 +90,17 @@ static DEFINE_SPINLOCK(sun9i_a80_pll4_lock);
89 90
90static void __init sun9i_a80_pll4_setup(struct device_node *node) 91static void __init sun9i_a80_pll4_setup(struct device_node *node)
91{ 92{
92 sunxi_factors_register(node, &sun9i_a80_pll4_data, &sun9i_a80_pll4_lock); 93 void __iomem *reg;
94
95 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
96 if (!reg) {
97 pr_err("Could not get registers for a80-pll4-clk: %s\n",
98 node->name);
99 return;
100 }
101
102 sunxi_factors_register(node, &sun9i_a80_pll4_data,
103 &sun9i_a80_pll4_lock, reg);
93} 104}
94CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_setup); 105CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_setup);
95 106
@@ -139,8 +150,18 @@ static DEFINE_SPINLOCK(sun9i_a80_gt_lock);
139 150
140static void __init sun9i_a80_gt_setup(struct device_node *node) 151static void __init sun9i_a80_gt_setup(struct device_node *node)
141{ 152{
142 struct clk *gt = sunxi_factors_register(node, &sun9i_a80_gt_data, 153 void __iomem *reg;
143 &sun9i_a80_gt_lock); 154 struct clk *gt;
155
156 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
157 if (!reg) {
158 pr_err("Could not get registers for a80-gt-clk: %s\n",
159 node->name);
160 return;
161 }
162
163 gt = sunxi_factors_register(node, &sun9i_a80_gt_data,
164 &sun9i_a80_gt_lock, reg);
144 165
145 /* The GT bus clock needs to be always enabled */ 166 /* The GT bus clock needs to be always enabled */
146 __clk_get(gt); 167 __clk_get(gt);
@@ -194,7 +215,17 @@ static DEFINE_SPINLOCK(sun9i_a80_ahb_lock);
194 215
195static void __init sun9i_a80_ahb_setup(struct device_node *node) 216static void __init sun9i_a80_ahb_setup(struct device_node *node)
196{ 217{
197 sunxi_factors_register(node, &sun9i_a80_ahb_data, &sun9i_a80_ahb_lock); 218 void __iomem *reg;
219
220 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
221 if (!reg) {
222 pr_err("Could not get registers for a80-ahb-clk: %s\n",
223 node->name);
224 return;
225 }
226
227 sunxi_factors_register(node, &sun9i_a80_ahb_data,
228 &sun9i_a80_ahb_lock, reg);
198} 229}
199CLK_OF_DECLARE(sun9i_a80_ahb, "allwinner,sun9i-a80-ahb-clk", sun9i_a80_ahb_setup); 230CLK_OF_DECLARE(sun9i_a80_ahb, "allwinner,sun9i-a80-ahb-clk", sun9i_a80_ahb_setup);
200 231
@@ -210,7 +241,17 @@ static DEFINE_SPINLOCK(sun9i_a80_apb0_lock);
210 241
211static void __init sun9i_a80_apb0_setup(struct device_node *node) 242static void __init sun9i_a80_apb0_setup(struct device_node *node)
212{ 243{
213 sunxi_factors_register(node, &sun9i_a80_apb0_data, &sun9i_a80_apb0_lock); 244 void __iomem *reg;
245
246 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
247 if (!reg) {
248 pr_err("Could not get registers for a80-apb0-clk: %s\n",
249 node->name);
250 return;
251 }
252
253 sunxi_factors_register(node, &sun9i_a80_apb0_data,
254 &sun9i_a80_apb0_lock, reg);
214} 255}
215CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_setup); 256CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_setup);
216 257
@@ -266,6 +307,16 @@ static DEFINE_SPINLOCK(sun9i_a80_apb1_lock);
266 307
267static void __init sun9i_a80_apb1_setup(struct device_node *node) 308static void __init sun9i_a80_apb1_setup(struct device_node *node)
268{ 309{
269 sunxi_factors_register(node, &sun9i_a80_apb1_data, &sun9i_a80_apb1_lock); 310 void __iomem *reg;
311
312 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
313 if (!reg) {
314 pr_err("Could not get registers for a80-apb1-clk: %s\n",
315 node->name);
316 return;
317 }
318
319 sunxi_factors_register(node, &sun9i_a80_apb1_data,
320 &sun9i_a80_apb1_lock, reg);
270} 321}
271CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-clk", sun9i_a80_apb1_setup); 322CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-clk", sun9i_a80_apb1_setup);
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
new file mode 100644
index 000000000000..710c273648d7
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -0,0 +1,219 @@
1/*
2 * Copyright 2015 Chen-Yu Tsai
3 *
4 * Chen-Yu Tsai <wens@csie.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/clk-provider.h>
18#include <linux/clkdev.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_device.h>
22#include <linux/reset.h>
23#include <linux/platform_device.h>
24#include <linux/reset-controller.h>
25#include <linux/spinlock.h>
26
27#define SUN9I_MMC_WIDTH 4
28
29#define SUN9I_MMC_GATE_BIT 16
30#define SUN9I_MMC_RESET_BIT 18
31
32struct sun9i_mmc_clk_data {
33 spinlock_t lock;
34 void __iomem *membase;
35 struct clk *clk;
36 struct reset_control *reset;
37 struct clk_onecell_data clk_data;
38 struct reset_controller_dev rcdev;
39};
40
41static int sun9i_mmc_reset_assert(struct reset_controller_dev *rcdev,
42 unsigned long id)
43{
44 struct sun9i_mmc_clk_data *data = container_of(rcdev,
45 struct sun9i_mmc_clk_data,
46 rcdev);
47 unsigned long flags;
48 void __iomem *reg = data->membase + SUN9I_MMC_WIDTH * id;
49 u32 val;
50
51 clk_prepare_enable(data->clk);
52 spin_lock_irqsave(&data->lock, flags);
53
54 val = readl(reg);
55 writel(val & ~BIT(SUN9I_MMC_RESET_BIT), reg);
56
57 spin_unlock_irqrestore(&data->lock, flags);
58 clk_disable_unprepare(data->clk);
59
60 return 0;
61}
62
63static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
64 unsigned long id)
65{
66 struct sun9i_mmc_clk_data *data = container_of(rcdev,
67 struct sun9i_mmc_clk_data,
68 rcdev);
69 unsigned long flags;
70 void __iomem *reg = data->membase + SUN9I_MMC_WIDTH * id;
71 u32 val;
72
73 clk_prepare_enable(data->clk);
74 spin_lock_irqsave(&data->lock, flags);
75
76 val = readl(reg);
77 writel(val | BIT(SUN9I_MMC_RESET_BIT), reg);
78
79 spin_unlock_irqrestore(&data->lock, flags);
80 clk_disable_unprepare(data->clk);
81
82 return 0;
83}
84
85static struct reset_control_ops sun9i_mmc_reset_ops = {
86 .assert = sun9i_mmc_reset_assert,
87 .deassert = sun9i_mmc_reset_deassert,
88};
89
90static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
91{
92 struct device_node *np = pdev->dev.of_node;
93 struct sun9i_mmc_clk_data *data;
94 struct clk_onecell_data *clk_data;
95 const char *clk_name = np->name;
96 const char *clk_parent;
97 struct resource *r;
98 int count, i, ret;
99
100 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
101 if (!data)
102 return -ENOMEM;
103
104 spin_lock_init(&data->lock);
105
106 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
107 /* one clock/reset pair per word */
108 count = DIV_ROUND_UP((r->end - r->start + 1), SUN9I_MMC_WIDTH);
109 data->membase = devm_ioremap_resource(&pdev->dev, r);
110 if (IS_ERR(data->membase))
111 return PTR_ERR(data->membase);
112
113 clk_data = &data->clk_data;
114 clk_data->clk_num = count;
115 clk_data->clks = devm_kcalloc(&pdev->dev, count, sizeof(struct clk *),
116 GFP_KERNEL);
117 if (!clk_data->clks)
118 return -ENOMEM;
119
120 data->clk = devm_clk_get(&pdev->dev, NULL);
121 if (IS_ERR(data->clk)) {
122 dev_err(&pdev->dev, "Could not get clock\n");
123 return PTR_ERR(data->clk);
124 }
125
126 data->reset = devm_reset_control_get(&pdev->dev, NULL);
127 if (IS_ERR(data->reset)) {
128 dev_err(&pdev->dev, "Could not get reset control\n");
129 return PTR_ERR(data->reset);
130 }
131
132 ret = reset_control_deassert(data->reset);
133 if (ret) {
134 dev_err(&pdev->dev, "Reset deassert err %d\n", ret);
135 return ret;
136 }
137
138 clk_parent = __clk_get_name(data->clk);
139 for (i = 0; i < count; i++) {
140 of_property_read_string_index(np, "clock-output-names",
141 i, &clk_name);
142
143 clk_data->clks[i] = clk_register_gate(&pdev->dev, clk_name,
144 clk_parent, 0,
145 data->membase + SUN9I_MMC_WIDTH * i,
146 SUN9I_MMC_GATE_BIT, 0,
147 &data->lock);
148
149 if (IS_ERR(clk_data->clks[i])) {
150 ret = PTR_ERR(clk_data->clks[i]);
151 goto err_clk_register;
152 }
153 }
154
155 ret = of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
156 if (ret)
157 goto err_clk_provider;
158
159 data->rcdev.owner = THIS_MODULE;
160 data->rcdev.nr_resets = count;
161 data->rcdev.ops = &sun9i_mmc_reset_ops;
162 data->rcdev.of_node = pdev->dev.of_node;
163
164 ret = reset_controller_register(&data->rcdev);
165 if (ret)
166 goto err_rc_reg;
167
168 platform_set_drvdata(pdev, data);
169
170 return 0;
171
172err_rc_reg:
173 of_clk_del_provider(np);
174
175err_clk_provider:
176 for (i = 0; i < count; i++)
177 clk_unregister(clk_data->clks[i]);
178
179err_clk_register:
180 reset_control_assert(data->reset);
181
182 return ret;
183}
184
185static int sun9i_a80_mmc_config_clk_remove(struct platform_device *pdev)
186{
187 struct device_node *np = pdev->dev.of_node;
188 struct sun9i_mmc_clk_data *data = platform_get_drvdata(pdev);
189 struct clk_onecell_data *clk_data = &data->clk_data;
190 int i;
191
192 reset_controller_unregister(&data->rcdev);
193 of_clk_del_provider(np);
194 for (i = 0; i < clk_data->clk_num; i++)
195 clk_unregister(clk_data->clks[i]);
196
197 reset_control_assert(data->reset);
198
199 return 0;
200}
201
202static const struct of_device_id sun9i_a80_mmc_config_clk_dt_ids[] = {
203 { .compatible = "allwinner,sun9i-a80-mmc-config-clk" },
204 { /* sentinel */ }
205};
206
207static struct platform_driver sun9i_a80_mmc_config_clk_driver = {
208 .driver = {
209 .name = "sun9i-a80-mmc-config-clk",
210 .of_match_table = sun9i_a80_mmc_config_clk_dt_ids,
211 },
212 .probe = sun9i_a80_mmc_config_clk_probe,
213 .remove = sun9i_a80_mmc_config_clk_remove,
214};
215module_platform_driver(sun9i_a80_mmc_config_clk_driver);
216
217MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
218MODULE_DESCRIPTION("Allwinner A80 MMC clock/reset Driver");
219MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 1818f404538d..379324eb5486 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -20,11 +20,221 @@
20#include <linux/of_address.h> 20#include <linux/of_address.h>
21#include <linux/reset-controller.h> 21#include <linux/reset-controller.h>
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/log2.h>
23 24
24#include "clk-factors.h" 25#include "clk-factors.h"
25 26
26static DEFINE_SPINLOCK(clk_lock); 27static DEFINE_SPINLOCK(clk_lock);
27 28
29/**
30 * sun6i_a31_ahb1_clk_setup() - Setup function for a31 ahb1 composite clk
31 */
32
33#define SUN6I_AHB1_MAX_PARENTS 4
34#define SUN6I_AHB1_MUX_PARENT_PLL6 3
35#define SUN6I_AHB1_MUX_SHIFT 12
36/* un-shifted mask is what mux_clk expects */
37#define SUN6I_AHB1_MUX_MASK 0x3
38#define SUN6I_AHB1_MUX_GET_PARENT(reg) ((reg >> SUN6I_AHB1_MUX_SHIFT) & \
39 SUN6I_AHB1_MUX_MASK)
40
41#define SUN6I_AHB1_DIV_SHIFT 4
42#define SUN6I_AHB1_DIV_MASK (0x3 << SUN6I_AHB1_DIV_SHIFT)
43#define SUN6I_AHB1_DIV_GET(reg) ((reg & SUN6I_AHB1_DIV_MASK) >> \
44 SUN6I_AHB1_DIV_SHIFT)
45#define SUN6I_AHB1_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_DIV_MASK) | \
46 (div << SUN6I_AHB1_DIV_SHIFT))
47#define SUN6I_AHB1_PLL6_DIV_SHIFT 6
48#define SUN6I_AHB1_PLL6_DIV_MASK (0x3 << SUN6I_AHB1_PLL6_DIV_SHIFT)
49#define SUN6I_AHB1_PLL6_DIV_GET(reg) ((reg & SUN6I_AHB1_PLL6_DIV_MASK) >> \
50 SUN6I_AHB1_PLL6_DIV_SHIFT)
51#define SUN6I_AHB1_PLL6_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_PLL6_DIV_MASK) | \
52 (div << SUN6I_AHB1_PLL6_DIV_SHIFT))
53
54struct sun6i_ahb1_clk {
55 struct clk_hw hw;
56 void __iomem *reg;
57};
58
59#define to_sun6i_ahb1_clk(_hw) container_of(_hw, struct sun6i_ahb1_clk, hw)
60
61static unsigned long sun6i_ahb1_clk_recalc_rate(struct clk_hw *hw,
62 unsigned long parent_rate)
63{
64 struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
65 unsigned long rate;
66 u32 reg;
67
68 /* Fetch the register value */
69 reg = readl(ahb1->reg);
70
71 /* apply pre-divider first if parent is pll6 */
72 if (SUN6I_AHB1_MUX_GET_PARENT(reg) == SUN6I_AHB1_MUX_PARENT_PLL6)
73 parent_rate /= SUN6I_AHB1_PLL6_DIV_GET(reg) + 1;
74
75 /* clk divider */
76 rate = parent_rate >> SUN6I_AHB1_DIV_GET(reg);
77
78 return rate;
79}
80
81static long sun6i_ahb1_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
82 u8 parent, unsigned long parent_rate)
83{
84 u8 div, calcp, calcm = 1;
85
86 /*
87 * clock can only divide, so we will never be able to achieve
88 * frequencies higher than the parent frequency
89 */
90 if (parent_rate && rate > parent_rate)
91 rate = parent_rate;
92
93 div = DIV_ROUND_UP(parent_rate, rate);
94
95 /* calculate pre-divider if parent is pll6 */
96 if (parent == SUN6I_AHB1_MUX_PARENT_PLL6) {
97 if (div < 4)
98 calcp = 0;
99 else if (div / 2 < 4)
100 calcp = 1;
101 else if (div / 4 < 4)
102 calcp = 2;
103 else
104 calcp = 3;
105
106 calcm = DIV_ROUND_UP(div, 1 << calcp);
107 } else {
108 calcp = __roundup_pow_of_two(div);
109 calcp = calcp > 3 ? 3 : calcp;
110 }
111
112 /* we were asked to pass back divider values */
113 if (divp) {
114 *divp = calcp;
115 *pre_divp = calcm - 1;
116 }
117
118 return (parent_rate / calcm) >> calcp;
119}
120
121static long sun6i_ahb1_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
122 unsigned long min_rate,
123 unsigned long max_rate,
124 unsigned long *best_parent_rate,
125 struct clk_hw **best_parent_clk)
126{
127 struct clk *clk = hw->clk, *parent, *best_parent = NULL;
128 int i, num_parents;
129 unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
130
131 /* find the parent that can help provide the fastest rate <= rate */
132 num_parents = __clk_get_num_parents(clk);
133 for (i = 0; i < num_parents; i++) {
134 parent = clk_get_parent_by_index(clk, i);
135 if (!parent)
136 continue;
137 if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT)
138 parent_rate = __clk_round_rate(parent, rate);
139 else
140 parent_rate = __clk_get_rate(parent);
141
142 child_rate = sun6i_ahb1_clk_round(rate, NULL, NULL, i,
143 parent_rate);
144
145 if (child_rate <= rate && child_rate > best_child_rate) {
146 best_parent = parent;
147 best = parent_rate;
148 best_child_rate = child_rate;
149 }
150 }
151
152 if (best_parent)
153 *best_parent_clk = __clk_get_hw(best_parent);
154 *best_parent_rate = best;
155
156 return best_child_rate;
157}
158
159static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate,
160 unsigned long parent_rate)
161{
162 struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
163 unsigned long flags;
164 u8 div, pre_div, parent;
165 u32 reg;
166
167 spin_lock_irqsave(&clk_lock, flags);
168
169 reg = readl(ahb1->reg);
170
171 /* need to know which parent is used to apply pre-divider */
172 parent = SUN6I_AHB1_MUX_GET_PARENT(reg);
173 sun6i_ahb1_clk_round(rate, &div, &pre_div, parent, parent_rate);
174
175 reg = SUN6I_AHB1_DIV_SET(reg, div);
176 reg = SUN6I_AHB1_PLL6_DIV_SET(reg, pre_div);
177 writel(reg, ahb1->reg);
178
179 spin_unlock_irqrestore(&clk_lock, flags);
180
181 return 0;
182}
183
184static const struct clk_ops sun6i_ahb1_clk_ops = {
185 .determine_rate = sun6i_ahb1_clk_determine_rate,
186 .recalc_rate = sun6i_ahb1_clk_recalc_rate,
187 .set_rate = sun6i_ahb1_clk_set_rate,
188};
189
190static void __init sun6i_ahb1_clk_setup(struct device_node *node)
191{
192 struct clk *clk;
193 struct sun6i_ahb1_clk *ahb1;
194 struct clk_mux *mux;
195 const char *clk_name = node->name;
196 const char *parents[SUN6I_AHB1_MAX_PARENTS];
197 void __iomem *reg;
198 int i = 0;
199
200 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
201
202 /* we have a mux, we will have >1 parents */
203 while (i < SUN6I_AHB1_MAX_PARENTS &&
204 (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
205 i++;
206
207 of_property_read_string(node, "clock-output-names", &clk_name);
208
209 ahb1 = kzalloc(sizeof(struct sun6i_ahb1_clk), GFP_KERNEL);
210 if (!ahb1)
211 return;
212
213 mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
214 if (!mux) {
215 kfree(ahb1);
216 return;
217 }
218
219 /* set up clock properties */
220 mux->reg = reg;
221 mux->shift = SUN6I_AHB1_MUX_SHIFT;
222 mux->mask = SUN6I_AHB1_MUX_MASK;
223 mux->lock = &clk_lock;
224 ahb1->reg = reg;
225
226 clk = clk_register_composite(NULL, clk_name, parents, i,
227 &mux->hw, &clk_mux_ops,
228 &ahb1->hw, &sun6i_ahb1_clk_ops,
229 NULL, NULL, 0);
230
231 if (!IS_ERR(clk)) {
232 of_clk_add_provider(node, of_clk_src_simple_get, clk);
233 clk_register_clkdev(clk, clk_name, NULL);
234 }
235}
236CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_setup);
237
28/* Maximum number of parents our clocks have */ 238/* Maximum number of parents our clocks have */
29#define SUNXI_MAX_PARENTS 5 239#define SUNXI_MAX_PARENTS 5
30 240
@@ -355,43 +565,6 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
355} 565}
356 566
357/** 567/**
358 * clk_sunxi_mmc_phase_control() - configures MMC clock phase control
359 */
360
361void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output)
362{
363 #define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw)
364 #define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw)
365
366 struct clk_hw *hw = __clk_get_hw(clk);
367 struct clk_composite *composite = to_clk_composite(hw);
368 struct clk_hw *rate_hw = composite->rate_hw;
369 struct clk_factors *factors = to_clk_factors(rate_hw);
370 unsigned long flags = 0;
371 u32 reg;
372
373 if (factors->lock)
374 spin_lock_irqsave(factors->lock, flags);
375
376 reg = readl(factors->reg);
377
378 /* set sample clock phase control */
379 reg &= ~(0x7 << 20);
380 reg |= ((sample & 0x7) << 20);
381
382 /* set output clock phase control */
383 reg &= ~(0x7 << 8);
384 reg |= ((output & 0x7) << 8);
385
386 writel(reg, factors->reg);
387
388 if (factors->lock)
389 spin_unlock_irqrestore(factors->lock, flags);
390}
391EXPORT_SYMBOL(clk_sunxi_mmc_phase_control);
392
393
394/**
395 * sunxi_factors_clk_setup() - Setup function for factor clocks 568 * sunxi_factors_clk_setup() - Setup function for factor clocks
396 */ 569 */
397 570
@@ -413,6 +586,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
413 .kwidth = 2, 586 .kwidth = 2,
414 .mshift = 0, 587 .mshift = 0,
415 .mwidth = 2, 588 .mwidth = 2,
589 .n_start = 1,
416}; 590};
417 591
418static struct clk_factors_config sun8i_a23_pll1_config = { 592static struct clk_factors_config sun8i_a23_pll1_config = {
@@ -520,7 +694,16 @@ static const struct factors_data sun7i_a20_out_data __initconst = {
520static struct clk * __init sunxi_factors_clk_setup(struct device_node *node, 694static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
521 const struct factors_data *data) 695 const struct factors_data *data)
522{ 696{
523 return sunxi_factors_register(node, data, &clk_lock); 697 void __iomem *reg;
698
699 reg = of_iomap(node, 0);
700 if (!reg) {
701 pr_err("Could not get registers for factors-clk: %s\n",
702 node->name);
703 return NULL;
704 }
705
706 return sunxi_factors_register(node, data, &clk_lock, reg);
524} 707}
525 708
526 709
@@ -561,7 +744,7 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
561 of_property_read_string(node, "clock-output-names", &clk_name); 744 of_property_read_string(node, "clock-output-names", &clk_name);
562 745
563 clk = clk_register_mux(NULL, clk_name, parents, i, 746 clk = clk_register_mux(NULL, clk_name, parents, i,
564 CLK_SET_RATE_NO_REPARENT, reg, 747 CLK_SET_RATE_PARENT, reg,
565 data->shift, SUNXI_MUX_GATE_WIDTH, 748 data->shift, SUNXI_MUX_GATE_WIDTH,
566 0, &clk_lock); 749 0, &clk_lock);
567 750
@@ -1217,7 +1400,6 @@ CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sun5i_init_clocks);
1217 1400
1218static const char *sun6i_critical_clocks[] __initdata = { 1401static const char *sun6i_critical_clocks[] __initdata = {
1219 "cpu", 1402 "cpu",
1220 "ahb1_sdram",
1221}; 1403};
1222 1404
1223static void __init sun6i_init_clocks(struct device_node *node) 1405static void __init sun6i_init_clocks(struct device_node *node)
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index f7dfb72884a4..edb8358fa6ce 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
15obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o 15obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
16obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o 16obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
17obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o 17obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o
18obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index 0011d547a9f7..60738cc954cb 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -64,10 +64,8 @@ enum clk_id {
64 tegra_clk_disp2, 64 tegra_clk_disp2,
65 tegra_clk_dp2, 65 tegra_clk_dp2,
66 tegra_clk_dpaux, 66 tegra_clk_dpaux,
67 tegra_clk_dsia,
68 tegra_clk_dsialp, 67 tegra_clk_dsialp,
69 tegra_clk_dsia_mux, 68 tegra_clk_dsia_mux,
70 tegra_clk_dsib,
71 tegra_clk_dsiblp, 69 tegra_clk_dsiblp,
72 tegra_clk_dsib_mux, 70 tegra_clk_dsib_mux,
73 tegra_clk_dtv, 71 tegra_clk_dtv,
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index 9e899c18af86..d84ae49d0e05 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -28,7 +28,7 @@ static u8 clk_periph_get_parent(struct clk_hw *hw)
28 const struct clk_ops *mux_ops = periph->mux_ops; 28 const struct clk_ops *mux_ops = periph->mux_ops;
29 struct clk_hw *mux_hw = &periph->mux.hw; 29 struct clk_hw *mux_hw = &periph->mux.hw;
30 30
31 mux_hw->clk = hw->clk; 31 __clk_hw_set_clk(mux_hw, hw);
32 32
33 return mux_ops->get_parent(mux_hw); 33 return mux_ops->get_parent(mux_hw);
34} 34}
@@ -39,7 +39,7 @@ static int clk_periph_set_parent(struct clk_hw *hw, u8 index)
39 const struct clk_ops *mux_ops = periph->mux_ops; 39 const struct clk_ops *mux_ops = periph->mux_ops;
40 struct clk_hw *mux_hw = &periph->mux.hw; 40 struct clk_hw *mux_hw = &periph->mux.hw;
41 41
42 mux_hw->clk = hw->clk; 42 __clk_hw_set_clk(mux_hw, hw);
43 43
44 return mux_ops->set_parent(mux_hw, index); 44 return mux_ops->set_parent(mux_hw, index);
45} 45}
@@ -51,7 +51,7 @@ static unsigned long clk_periph_recalc_rate(struct clk_hw *hw,
51 const struct clk_ops *div_ops = periph->div_ops; 51 const struct clk_ops *div_ops = periph->div_ops;
52 struct clk_hw *div_hw = &periph->divider.hw; 52 struct clk_hw *div_hw = &periph->divider.hw;
53 53
54 div_hw->clk = hw->clk; 54 __clk_hw_set_clk(div_hw, hw);
55 55
56 return div_ops->recalc_rate(div_hw, parent_rate); 56 return div_ops->recalc_rate(div_hw, parent_rate);
57} 57}
@@ -63,7 +63,7 @@ static long clk_periph_round_rate(struct clk_hw *hw, unsigned long rate,
63 const struct clk_ops *div_ops = periph->div_ops; 63 const struct clk_ops *div_ops = periph->div_ops;
64 struct clk_hw *div_hw = &periph->divider.hw; 64 struct clk_hw *div_hw = &periph->divider.hw;
65 65
66 div_hw->clk = hw->clk; 66 __clk_hw_set_clk(div_hw, hw);
67 67
68 return div_ops->round_rate(div_hw, rate, prate); 68 return div_ops->round_rate(div_hw, rate, prate);
69} 69}
@@ -75,7 +75,7 @@ static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate,
75 const struct clk_ops *div_ops = periph->div_ops; 75 const struct clk_ops *div_ops = periph->div_ops;
76 struct clk_hw *div_hw = &periph->divider.hw; 76 struct clk_hw *div_hw = &periph->divider.hw;
77 77
78 div_hw->clk = hw->clk; 78 __clk_hw_set_clk(div_hw, hw);
79 79
80 return div_ops->set_rate(div_hw, rate, parent_rate); 80 return div_ops->set_rate(div_hw, rate, parent_rate);
81} 81}
@@ -86,7 +86,7 @@ static int clk_periph_is_enabled(struct clk_hw *hw)
86 const struct clk_ops *gate_ops = periph->gate_ops; 86 const struct clk_ops *gate_ops = periph->gate_ops;
87 struct clk_hw *gate_hw = &periph->gate.hw; 87 struct clk_hw *gate_hw = &periph->gate.hw;
88 88
89 gate_hw->clk = hw->clk; 89 __clk_hw_set_clk(gate_hw, hw);
90 90
91 return gate_ops->is_enabled(gate_hw); 91 return gate_ops->is_enabled(gate_hw);
92} 92}
@@ -97,7 +97,7 @@ static int clk_periph_enable(struct clk_hw *hw)
97 const struct clk_ops *gate_ops = periph->gate_ops; 97 const struct clk_ops *gate_ops = periph->gate_ops;
98 struct clk_hw *gate_hw = &periph->gate.hw; 98 struct clk_hw *gate_hw = &periph->gate.hw;
99 99
100 gate_hw->clk = hw->clk; 100 __clk_hw_set_clk(gate_hw, hw);
101 101
102 return gate_ops->enable(gate_hw); 102 return gate_ops->enable(gate_hw);
103} 103}
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index c7c6d8fb32fb..bfef9abdf232 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -816,7 +816,9 @@ const struct clk_ops tegra_clk_plle_ops = {
816 .enable = clk_plle_enable, 816 .enable = clk_plle_enable,
817}; 817};
818 818
819#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC) 819#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \
820 defined(CONFIG_ARCH_TEGRA_124_SOC) || \
821 defined(CONFIG_ARCH_TEGRA_132_SOC)
820 822
821static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params, 823static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
822 unsigned long parent_rate) 824 unsigned long parent_rate)
@@ -1505,7 +1507,9 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
1505 return clk; 1507 return clk;
1506} 1508}
1507 1509
1508#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC) 1510#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \
1511 defined(CONFIG_ARCH_TEGRA_124_SOC) || \
1512 defined(CONFIG_ARCH_TEGRA_132_SOC)
1509static const struct clk_ops tegra_clk_pllxc_ops = { 1513static const struct clk_ops tegra_clk_pllxc_ops = {
1510 .is_enabled = clk_pll_is_enabled, 1514 .is_enabled = clk_pll_is_enabled,
1511 .enable = clk_pll_iddq_enable, 1515 .enable = clk_pll_iddq_enable,
@@ -1565,7 +1569,7 @@ struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
1565 parent = __clk_lookup(parent_name); 1569 parent = __clk_lookup(parent_name);
1566 if (!parent) { 1570 if (!parent) {
1567 WARN(1, "parent clk %s of %s must be registered first\n", 1571 WARN(1, "parent clk %s of %s must be registered first\n",
1568 name, parent_name); 1572 parent_name, name);
1569 return ERR_PTR(-EINVAL); 1573 return ERR_PTR(-EINVAL);
1570 } 1574 }
1571 1575
@@ -1665,7 +1669,7 @@ struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
1665 parent = __clk_lookup(parent_name); 1669 parent = __clk_lookup(parent_name);
1666 if (!parent) { 1670 if (!parent) {
1667 WARN(1, "parent clk %s of %s must be registered first\n", 1671 WARN(1, "parent clk %s of %s must be registered first\n",
1668 name, parent_name); 1672 parent_name, name);
1669 return ERR_PTR(-EINVAL); 1673 return ERR_PTR(-EINVAL);
1670 } 1674 }
1671 1675
@@ -1706,7 +1710,7 @@ struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
1706 parent = __clk_lookup(parent_name); 1710 parent = __clk_lookup(parent_name);
1707 if (!parent) { 1711 if (!parent) {
1708 WARN(1, "parent clk %s of %s must be registered first\n", 1712 WARN(1, "parent clk %s of %s must be registered first\n",
1709 name, parent_name); 1713 parent_name, name);
1710 return ERR_PTR(-EINVAL); 1714 return ERR_PTR(-EINVAL);
1711 } 1715 }
1712 1716
@@ -1802,7 +1806,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
1802} 1806}
1803#endif 1807#endif
1804 1808
1805#ifdef CONFIG_ARCH_TEGRA_124_SOC 1809#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
1806static const struct clk_ops tegra_clk_pllss_ops = { 1810static const struct clk_ops tegra_clk_pllss_ops = {
1807 .is_enabled = clk_pll_is_enabled, 1811 .is_enabled = clk_pll_is_enabled,
1808 .enable = clk_pll_iddq_enable, 1812 .enable = clk_pll_iddq_enable,
@@ -1830,7 +1834,7 @@ struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name,
1830 parent = __clk_lookup(parent_name); 1834 parent = __clk_lookup(parent_name);
1831 if (!parent) { 1835 if (!parent) {
1832 WARN(1, "parent clk %s of %s must be registered first\n", 1836 WARN(1, "parent clk %s of %s must be registered first\n",
1833 name, parent_name); 1837 parent_name, name);
1834 return ERR_PTR(-EINVAL); 1838 return ERR_PTR(-EINVAL);
1835 } 1839 }
1836 1840
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 37f32c49674e..cef0727b9eec 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -434,10 +434,10 @@ static struct tegra_periph_init_data periph_clks[] = {
434 MUX("hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA, 125, TEGRA_PERIPH_ON_APB, tegra_clk_hda), 434 MUX("hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA, 125, TEGRA_PERIPH_ON_APB, tegra_clk_hda),
435 MUX("hda2codec_2x", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, TEGRA_PERIPH_ON_APB, tegra_clk_hda2codec_2x), 435 MUX("hda2codec_2x", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, TEGRA_PERIPH_ON_APB, tegra_clk_hda2codec_2x),
436 MUX("vfir", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VFIR, 7, TEGRA_PERIPH_ON_APB, tegra_clk_vfir), 436 MUX("vfir", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VFIR, 7, TEGRA_PERIPH_ON_APB, tegra_clk_vfir),
437 MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1), 437 MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1),
438 MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2), 438 MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2),
439 MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3), 439 MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3),
440 MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4), 440 MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4),
441 MUX("la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, TEGRA_PERIPH_ON_APB, tegra_clk_la), 441 MUX("la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, TEGRA_PERIPH_ON_APB, tegra_clk_la),
442 MUX("trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, TEGRA_PERIPH_ON_APB, tegra_clk_trace), 442 MUX("trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, TEGRA_PERIPH_ON_APB, tegra_clk_trace),
443 MUX("owr", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, TEGRA_PERIPH_ON_APB, tegra_clk_owr), 443 MUX("owr", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, TEGRA_PERIPH_ON_APB, tegra_clk_owr),
@@ -470,10 +470,10 @@ static struct tegra_periph_init_data periph_clks[] = {
470 MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1), 470 MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
471 MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1), 471 MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
472 MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 165, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2), 472 MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 165, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
473 MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8), 473 MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1_8),
474 MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8), 474 MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2_8),
475 MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8), 475 MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3_8),
476 MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8), 476 MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4_8),
477 MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8), 477 MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8),
478 MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8), 478 MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8),
479 MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8), 479 MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8),
@@ -537,8 +537,6 @@ static struct tegra_periph_init_data gate_clks[] = {
537 GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0), 537 GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0),
538 GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0), 538 GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0),
539 GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0), 539 GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0),
540 GATE("dsia", "dsia_mux", 48, 0, tegra_clk_dsia, 0),
541 GATE("dsib", "dsib_mux", 82, 0, tegra_clk_dsib, 0),
542 GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED), 540 GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED),
543 GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0), 541 GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0),
544 GATE("ispb", "clk_m", 3, 0, tegra_clk_ispb, 0), 542 GATE("ispb", "clk_m", 3, 0, tegra_clk_ispb, 0),
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 0b03d2cf7264..d0766423a5d6 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -715,7 +715,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
715 [tegra_clk_sbc2_8] = { .dt_id = TEGRA114_CLK_SBC2, .present = true }, 715 [tegra_clk_sbc2_8] = { .dt_id = TEGRA114_CLK_SBC2, .present = true },
716 [tegra_clk_sbc3_8] = { .dt_id = TEGRA114_CLK_SBC3, .present = true }, 716 [tegra_clk_sbc3_8] = { .dt_id = TEGRA114_CLK_SBC3, .present = true },
717 [tegra_clk_i2c5] = { .dt_id = TEGRA114_CLK_I2C5, .present = true }, 717 [tegra_clk_i2c5] = { .dt_id = TEGRA114_CLK_I2C5, .present = true },
718 [tegra_clk_dsia] = { .dt_id = TEGRA114_CLK_DSIA, .present = true },
719 [tegra_clk_mipi] = { .dt_id = TEGRA114_CLK_MIPI, .present = true }, 718 [tegra_clk_mipi] = { .dt_id = TEGRA114_CLK_MIPI, .present = true },
720 [tegra_clk_hdmi] = { .dt_id = TEGRA114_CLK_HDMI, .present = true }, 719 [tegra_clk_hdmi] = { .dt_id = TEGRA114_CLK_HDMI, .present = true },
721 [tegra_clk_csi] = { .dt_id = TEGRA114_CLK_CSI, .present = true }, 720 [tegra_clk_csi] = { .dt_id = TEGRA114_CLK_CSI, .present = true },
@@ -739,7 +738,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
739 [tegra_clk_dtv] = { .dt_id = TEGRA114_CLK_DTV, .present = true }, 738 [tegra_clk_dtv] = { .dt_id = TEGRA114_CLK_DTV, .present = true },
740 [tegra_clk_ndspeed] = { .dt_id = TEGRA114_CLK_NDSPEED, .present = true }, 739 [tegra_clk_ndspeed] = { .dt_id = TEGRA114_CLK_NDSPEED, .present = true },
741 [tegra_clk_i2cslow] = { .dt_id = TEGRA114_CLK_I2CSLOW, .present = true }, 740 [tegra_clk_i2cslow] = { .dt_id = TEGRA114_CLK_I2CSLOW, .present = true },
742 [tegra_clk_dsib] = { .dt_id = TEGRA114_CLK_DSIB, .present = true },
743 [tegra_clk_tsec] = { .dt_id = TEGRA114_CLK_TSEC, .present = true }, 741 [tegra_clk_tsec] = { .dt_id = TEGRA114_CLK_TSEC, .present = true },
744 [tegra_clk_xusb_host] = { .dt_id = TEGRA114_CLK_XUSB_HOST, .present = true }, 742 [tegra_clk_xusb_host] = { .dt_id = TEGRA114_CLK_XUSB_HOST, .present = true },
745 [tegra_clk_msenc] = { .dt_id = TEGRA114_CLK_MSENC, .present = true }, 743 [tegra_clk_msenc] = { .dt_id = TEGRA114_CLK_MSENC, .present = true },
@@ -1224,6 +1222,14 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base,
1224 clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock); 1222 clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
1225 clks[TEGRA114_CLK_DSIB_MUX] = clk; 1223 clks[TEGRA114_CLK_DSIB_MUX] = clk;
1226 1224
1225 clk = tegra_clk_register_periph_gate("dsia", "dsia_mux", 0, clk_base,
1226 0, 48, periph_clk_enb_refcnt);
1227 clks[TEGRA114_CLK_DSIA] = clk;
1228
1229 clk = tegra_clk_register_periph_gate("dsib", "dsib_mux", 0, clk_base,
1230 0, 82, periph_clk_enb_refcnt);
1231 clks[TEGRA114_CLK_DSIB] = clk;
1232
1227 /* emc mux */ 1233 /* emc mux */
1228 clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm, 1234 clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
1229 ARRAY_SIZE(mux_pllmcp_clkm), 1235 ARRAY_SIZE(mux_pllmcp_clkm),
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index f5f9baca7bb6..9a893f2fe8e9 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2012-2014 NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -28,6 +28,14 @@
28#include "clk.h" 28#include "clk.h"
29#include "clk-id.h" 29#include "clk-id.h"
30 30
31/*
32 * TEGRA124_CAR_BANK_COUNT: the number of peripheral clock register
33 * banks present in the Tegra124/132 CAR IP block. The banks are
34 * identified by single letters, e.g.: L, H, U, V, W, X. See
35 * periph_regs[] in drivers/clk/tegra/clk.c
36 */
37#define TEGRA124_CAR_BANK_COUNT 6
38
31#define CLK_SOURCE_CSITE 0x1d4 39#define CLK_SOURCE_CSITE 0x1d4
32#define CLK_SOURCE_EMC 0x19c 40#define CLK_SOURCE_EMC 0x19c
33 41
@@ -128,7 +136,6 @@ static unsigned long osc_freq;
128static unsigned long pll_ref_freq; 136static unsigned long pll_ref_freq;
129 137
130static DEFINE_SPINLOCK(pll_d_lock); 138static DEFINE_SPINLOCK(pll_d_lock);
131static DEFINE_SPINLOCK(pll_d2_lock);
132static DEFINE_SPINLOCK(pll_e_lock); 139static DEFINE_SPINLOCK(pll_e_lock);
133static DEFINE_SPINLOCK(pll_re_lock); 140static DEFINE_SPINLOCK(pll_re_lock);
134static DEFINE_SPINLOCK(pll_u_lock); 141static DEFINE_SPINLOCK(pll_u_lock);
@@ -145,11 +152,6 @@ static unsigned long tegra124_input_freq[] = {
145 [12] = 260000000, 152 [12] = 260000000,
146}; 153};
147 154
148static const char *mux_plld_out0_plld2_out0[] = {
149 "pll_d_out0", "pll_d2_out0",
150};
151#define mux_plld_out0_plld2_out0_idx NULL
152
153static const char *mux_pllmcp_clkm[] = { 155static const char *mux_pllmcp_clkm[] = {
154 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3", 156 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3",
155}; 157};
@@ -783,7 +785,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
783 [tegra_clk_sbc2] = { .dt_id = TEGRA124_CLK_SBC2, .present = true }, 785 [tegra_clk_sbc2] = { .dt_id = TEGRA124_CLK_SBC2, .present = true },
784 [tegra_clk_sbc3] = { .dt_id = TEGRA124_CLK_SBC3, .present = true }, 786 [tegra_clk_sbc3] = { .dt_id = TEGRA124_CLK_SBC3, .present = true },
785 [tegra_clk_i2c5] = { .dt_id = TEGRA124_CLK_I2C5, .present = true }, 787 [tegra_clk_i2c5] = { .dt_id = TEGRA124_CLK_I2C5, .present = true },
786 [tegra_clk_dsia] = { .dt_id = TEGRA124_CLK_DSIA, .present = true },
787 [tegra_clk_mipi] = { .dt_id = TEGRA124_CLK_MIPI, .present = true }, 788 [tegra_clk_mipi] = { .dt_id = TEGRA124_CLK_MIPI, .present = true },
788 [tegra_clk_hdmi] = { .dt_id = TEGRA124_CLK_HDMI, .present = true }, 789 [tegra_clk_hdmi] = { .dt_id = TEGRA124_CLK_HDMI, .present = true },
789 [tegra_clk_csi] = { .dt_id = TEGRA124_CLK_CSI, .present = true }, 790 [tegra_clk_csi] = { .dt_id = TEGRA124_CLK_CSI, .present = true },
@@ -809,7 +810,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
809 [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true }, 810 [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true },
810 [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true }, 811 [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true },
811 [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true }, 812 [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true },
812 [tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true },
813 [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true }, 813 [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true },
814 [tegra_clk_xusb_host] = { .dt_id = TEGRA124_CLK_XUSB_HOST, .present = true }, 814 [tegra_clk_xusb_host] = { .dt_id = TEGRA124_CLK_XUSB_HOST, .present = true },
815 [tegra_clk_msenc] = { .dt_id = TEGRA124_CLK_MSENC, .present = true }, 815 [tegra_clk_msenc] = { .dt_id = TEGRA124_CLK_MSENC, .present = true },
@@ -949,8 +949,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
949 [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true }, 949 [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true },
950 [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true }, 950 [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true },
951 [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true }, 951 [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
952 [tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true },
953 [tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true },
954}; 952};
955 953
956static struct tegra_devclk devclks[] __initdata = { 954static struct tegra_devclk devclks[] __initdata = {
@@ -1112,17 +1110,17 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
1112 1, 2); 1110 1, 2);
1113 clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk; 1111 clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk;
1114 1112
1115 /* dsia mux */ 1113 clk = clk_register_gate(NULL, "plld_dsi", "plld_out0", 0,
1116 clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0, 1114 clk_base + PLLD_MISC, 30, 0, &pll_d_lock);
1117 ARRAY_SIZE(mux_plld_out0_plld2_out0), 0, 1115 clks[TEGRA124_CLK_PLLD_DSI] = clk;
1118 clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock); 1116
1119 clks[TEGRA124_CLK_DSIA_MUX] = clk; 1117 clk = tegra_clk_register_periph_gate("dsia", "plld_dsi", 0, clk_base,
1118 0, 48, periph_clk_enb_refcnt);
1119 clks[TEGRA124_CLK_DSIA] = clk;
1120 1120
1121 /* dsib mux */ 1121 clk = tegra_clk_register_periph_gate("dsib", "plld_dsi", 0, clk_base,
1122 clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0, 1122 0, 82, periph_clk_enb_refcnt);
1123 ARRAY_SIZE(mux_plld_out0_plld2_out0), 0, 1123 clks[TEGRA124_CLK_DSIB] = clk;
1124 clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
1125 clks[TEGRA124_CLK_DSIB_MUX] = clk;
1126 1124
1127 /* emc mux */ 1125 /* emc mux */
1128 clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm, 1126 clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
@@ -1351,7 +1349,7 @@ static const struct of_device_id pmc_match[] __initconst = {
1351 {}, 1349 {},
1352}; 1350};
1353 1351
1354static struct tegra_clk_init_table init_table[] __initdata = { 1352static struct tegra_clk_init_table common_init_table[] __initdata = {
1355 {TEGRA124_CLK_UARTA, TEGRA124_CLK_PLL_P, 408000000, 0}, 1353 {TEGRA124_CLK_UARTA, TEGRA124_CLK_PLL_P, 408000000, 0},
1356 {TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0}, 1354 {TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0},
1357 {TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0}, 1355 {TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0},
@@ -1368,6 +1366,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
1368 {TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0}, 1366 {TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
1369 {TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_P, 0, 0}, 1367 {TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_P, 0, 0},
1370 {TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1}, 1368 {TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1},
1369 {TEGRA124_CLK_DSIALP, TEGRA124_CLK_PLL_P, 68000000, 0},
1370 {TEGRA124_CLK_DSIBLP, TEGRA124_CLK_PLL_P, 68000000, 0},
1371 {TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1}, 1371 {TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1},
1372 {TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1}, 1372 {TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1},
1373 {TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1}, 1373 {TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1},
@@ -1385,27 +1385,73 @@ static struct tegra_clk_init_table init_table[] __initdata = {
1385 {TEGRA124_CLK_SATA, TEGRA124_CLK_PLL_P, 104000000, 0}, 1385 {TEGRA124_CLK_SATA, TEGRA124_CLK_PLL_P, 104000000, 0},
1386 {TEGRA124_CLK_SATA_OOB, TEGRA124_CLK_PLL_P, 204000000, 0}, 1386 {TEGRA124_CLK_SATA_OOB, TEGRA124_CLK_PLL_P, 204000000, 0},
1387 {TEGRA124_CLK_EMC, TEGRA124_CLK_CLK_MAX, 0, 1}, 1387 {TEGRA124_CLK_EMC, TEGRA124_CLK_CLK_MAX, 0, 1},
1388 {TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
1389 {TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1}, 1388 {TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1},
1390 {TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1}, 1389 {TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1},
1391 {TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0}, 1390 {TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0},
1391 /* This MUST be the last entry. */
1392 {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
1393};
1394
1395static struct tegra_clk_init_table tegra124_init_table[] __initdata = {
1392 {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 0}, 1396 {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 0},
1397 {TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
1398 /* This MUST be the last entry. */
1399 {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
1400};
1401
1402/* Tegra132 requires the SOC_THERM clock to remain active */
1403static struct tegra_clk_init_table tegra132_init_table[] __initdata = {
1404 {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 1},
1393 /* This MUST be the last entry. */ 1405 /* This MUST be the last entry. */
1394 {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0}, 1406 {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
1395}; 1407};
1396 1408
1409/**
1410 * tegra124_clock_apply_init_table - initialize clocks on Tegra124 SoCs
1411 *
1412 * Program an initial clock rate and enable or disable clocks needed
1413 * by the rest of the kernel, for Tegra124 SoCs. It is intended to be
1414 * called by assigning a pointer to it to tegra_clk_apply_init_table -
1415 * this will be called as an arch_initcall. No return value.
1416 */
1397static void __init tegra124_clock_apply_init_table(void) 1417static void __init tegra124_clock_apply_init_table(void)
1398{ 1418{
1399 tegra_init_from_table(init_table, clks, TEGRA124_CLK_CLK_MAX); 1419 tegra_init_from_table(common_init_table, clks, TEGRA124_CLK_CLK_MAX);
1420 tegra_init_from_table(tegra124_init_table, clks, TEGRA124_CLK_CLK_MAX);
1400} 1421}
1401 1422
1402static void __init tegra124_clock_init(struct device_node *np) 1423/**
1424 * tegra132_clock_apply_init_table - initialize clocks on Tegra132 SoCs
1425 *
1426 * Program an initial clock rate and enable or disable clocks needed
1427 * by the rest of the kernel, for Tegra132 SoCs. It is intended to be
1428 * called by assigning a pointer to it to tegra_clk_apply_init_table -
1429 * this will be called as an arch_initcall. No return value.
1430 */
1431static void __init tegra132_clock_apply_init_table(void)
1432{
1433 tegra_init_from_table(common_init_table, clks, TEGRA124_CLK_CLK_MAX);
1434 tegra_init_from_table(tegra132_init_table, clks, TEGRA124_CLK_CLK_MAX);
1435}
1436
1437/**
1438 * tegra124_132_clock_init_pre - clock initialization preamble for T124/T132
1439 * @np: struct device_node * of the DT node for the SoC CAR IP block
1440 *
1441 * Register most of the clocks controlled by the CAR IP block, along
1442 * with a few clocks controlled by the PMC IP block. Everything in
1443 * this function should be common to Tegra124 and Tegra132. XXX The
1444 * PMC clock initialization should probably be moved to PMC-specific
1445 * driver code. No return value.
1446 */
1447static void __init tegra124_132_clock_init_pre(struct device_node *np)
1403{ 1448{
1404 struct device_node *node; 1449 struct device_node *node;
1450 u32 plld_base;
1405 1451
1406 clk_base = of_iomap(np, 0); 1452 clk_base = of_iomap(np, 0);
1407 if (!clk_base) { 1453 if (!clk_base) {
1408 pr_err("ioremap tegra124 CAR failed\n"); 1454 pr_err("ioremap tegra124/tegra132 CAR failed\n");
1409 return; 1455 return;
1410 } 1456 }
1411 1457
@@ -1423,7 +1469,8 @@ static void __init tegra124_clock_init(struct device_node *np)
1423 return; 1469 return;
1424 } 1470 }
1425 1471
1426 clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX, 6); 1472 clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX,
1473 TEGRA124_CAR_BANK_COUNT);
1427 if (!clks) 1474 if (!clks)
1428 return; 1475 return;
1429 1476
@@ -1437,13 +1484,76 @@ static void __init tegra124_clock_init(struct device_node *np)
1437 tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, &pll_a_params); 1484 tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, &pll_a_params);
1438 tegra_pmc_clk_init(pmc_base, tegra124_clks); 1485 tegra_pmc_clk_init(pmc_base, tegra124_clks);
1439 1486
1487 /* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
1488 plld_base = clk_readl(clk_base + PLLD_BASE);
1489 plld_base &= ~BIT(25);
1490 clk_writel(plld_base, clk_base + PLLD_BASE);
1491}
1492
1493/**
1494 * tegra124_132_clock_init_post - clock initialization postamble for T124/T132
1495 * @np: struct device_node * of the DT node for the SoC CAR IP block
1496 *
1497 * Register most of the along with a few clocks controlled by the PMC
1498 * IP block. Everything in this function should be common to Tegra124
1499 * and Tegra132. This function must be called after
1500 * tegra124_132_clock_init_pre(), otherwise clk_base and pmc_base will
1501 * not be set. No return value.
1502 */
1503static void __init tegra124_132_clock_init_post(struct device_node *np)
1504{
1440 tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks, 1505 tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks,
1441 &pll_x_params); 1506 &pll_x_params);
1442 tegra_add_of_provider(np); 1507 tegra_add_of_provider(np);
1443 tegra_register_devclks(devclks, ARRAY_SIZE(devclks)); 1508 tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
1444 1509
1510 tegra_cpu_car_ops = &tegra124_cpu_car_ops;
1511}
1512
1513/**
1514 * tegra124_clock_init - Tegra124-specific clock initialization
1515 * @np: struct device_node * of the DT node for the SoC CAR IP block
1516 *
1517 * Register most SoC clocks for the Tegra124 system-on-chip. Most of
1518 * this code is shared between the Tegra124 and Tegra132 SoCs,
1519 * although some of the initial clock settings and CPU clocks differ.
1520 * Intended to be called by the OF init code when a DT node with the
1521 * "nvidia,tegra124-car" string is encountered, and declared with
1522 * CLK_OF_DECLARE. No return value.
1523 */
1524static void __init tegra124_clock_init(struct device_node *np)
1525{
1526 tegra124_132_clock_init_pre(np);
1445 tegra_clk_apply_init_table = tegra124_clock_apply_init_table; 1527 tegra_clk_apply_init_table = tegra124_clock_apply_init_table;
1528 tegra124_132_clock_init_post(np);
1529}
1446 1530
1447 tegra_cpu_car_ops = &tegra124_cpu_car_ops; 1531/**
1532 * tegra132_clock_init - Tegra132-specific clock initialization
1533 * @np: struct device_node * of the DT node for the SoC CAR IP block
1534 *
1535 * Register most SoC clocks for the Tegra132 system-on-chip. Most of
1536 * this code is shared between the Tegra124 and Tegra132 SoCs,
1537 * although some of the initial clock settings and CPU clocks differ.
1538 * Intended to be called by the OF init code when a DT node with the
1539 * "nvidia,tegra132-car" string is encountered, and declared with
1540 * CLK_OF_DECLARE. No return value.
1541 */
1542static void __init tegra132_clock_init(struct device_node *np)
1543{
1544 tegra124_132_clock_init_pre(np);
1545
1546 /*
1547 * On Tegra132, these clocks are controlled by the
1548 * CLUSTER_clocks IP block, located in the CPU complex
1549 */
1550 tegra124_clks[tegra_clk_cclk_g].present = false;
1551 tegra124_clks[tegra_clk_cclk_lp].present = false;
1552 tegra124_clks[tegra_clk_pll_x].present = false;
1553 tegra124_clks[tegra_clk_pll_x_out0].present = false;
1554
1555 tegra_clk_apply_init_table = tegra132_clock_apply_init_table;
1556 tegra124_132_clock_init_post(np);
1448} 1557}
1449CLK_OF_DECLARE(tegra124, "nvidia,tegra124-car", tegra124_clock_init); 1558CLK_OF_DECLARE(tegra124, "nvidia,tegra124-car", tegra124_clock_init);
1559CLK_OF_DECLARE(tegra132, "nvidia,tegra132-car", tegra132_clock_init);
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 97dc8595c3cd..9ddb7547cb43 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -302,10 +302,13 @@ struct clk ** __init tegra_lookup_dt_id(int clk_id,
302 302
303tegra_clk_apply_init_table_func tegra_clk_apply_init_table; 303tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
304 304
305void __init tegra_clocks_apply_init_table(void) 305static int __init tegra_clocks_apply_init_table(void)
306{ 306{
307 if (!tegra_clk_apply_init_table) 307 if (!tegra_clk_apply_init_table)
308 return; 308 return 0;
309 309
310 tegra_clk_apply_init_table(); 310 tegra_clk_apply_init_table();
311
312 return 0;
311} 313}
314arch_initcall(tegra_clocks_apply_init_table);
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
index ed4d0aaf8916..105ffd0f5e79 100644
--- a/drivers/clk/ti/Makefile
+++ b/drivers/clk/ti/Makefile
@@ -1,13 +1,17 @@
1ifneq ($(CONFIG_OF),)
2obj-y += clk.o autoidle.o clockdomain.o 1obj-y += clk.o autoidle.o clockdomain.o
3clk-common = dpll.o composite.o divider.o gate.o \ 2clk-common = dpll.o composite.o divider.o gate.o \
4 fixed-factor.o mux.o apll.o 3 fixed-factor.o mux.o apll.o
5obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o 4obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o
5obj-$(CONFIG_SOC_TI81XX) += $(clk-common) fapll.o clk-816x.o
6obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o 6obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o
7obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o clk-3xxx.o 7obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o \
8 clk-3xxx.o
8obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o 9obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o
9obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o 10obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o
10obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o \ 11obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o \
11 clk-dra7-atl.o 12 clk-dra7-atl.o
12obj-$(CONFIG_SOC_AM43XX) += $(clk-common) clk-43xx.o 13obj-$(CONFIG_SOC_AM43XX) += $(clk-common) clk-43xx.o
14
15ifdef CONFIG_ATAGS
16obj-$(CONFIG_ARCH_OMAP3) += clk-3xxx-legacy.o
13endif 17endif
diff --git a/drivers/clk/ti/clk-3xxx-legacy.c b/drivers/clk/ti/clk-3xxx-legacy.c
new file mode 100644
index 000000000000..e0732a4c8f26
--- /dev/null
+++ b/drivers/clk/ti/clk-3xxx-legacy.c
@@ -0,0 +1,4653 @@
1/*
2 * OMAP3 Legacy clock data
3 *
4 * Copyright (C) 2014 Texas Instruments, Inc
5 * Tero Kristo (t-kristo@ti.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation version 2.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/kernel.h>
18#include <linux/clk-provider.h>
19#include <linux/clk/ti.h>
20
21#include "clock.h"
22
23static struct ti_clk_fixed virt_12m_ck_data = {
24 .frequency = 12000000,
25};
26
27static struct ti_clk virt_12m_ck = {
28 .name = "virt_12m_ck",
29 .type = TI_CLK_FIXED,
30 .data = &virt_12m_ck_data,
31};
32
33static struct ti_clk_fixed virt_13m_ck_data = {
34 .frequency = 13000000,
35};
36
37static struct ti_clk virt_13m_ck = {
38 .name = "virt_13m_ck",
39 .type = TI_CLK_FIXED,
40 .data = &virt_13m_ck_data,
41};
42
43static struct ti_clk_fixed virt_19200000_ck_data = {
44 .frequency = 19200000,
45};
46
47static struct ti_clk virt_19200000_ck = {
48 .name = "virt_19200000_ck",
49 .type = TI_CLK_FIXED,
50 .data = &virt_19200000_ck_data,
51};
52
53static struct ti_clk_fixed virt_26000000_ck_data = {
54 .frequency = 26000000,
55};
56
57static struct ti_clk virt_26000000_ck = {
58 .name = "virt_26000000_ck",
59 .type = TI_CLK_FIXED,
60 .data = &virt_26000000_ck_data,
61};
62
63static struct ti_clk_fixed virt_38_4m_ck_data = {
64 .frequency = 38400000,
65};
66
67static struct ti_clk virt_38_4m_ck = {
68 .name = "virt_38_4m_ck",
69 .type = TI_CLK_FIXED,
70 .data = &virt_38_4m_ck_data,
71};
72
73static struct ti_clk_fixed virt_16_8m_ck_data = {
74 .frequency = 16800000,
75};
76
77static struct ti_clk virt_16_8m_ck = {
78 .name = "virt_16_8m_ck",
79 .type = TI_CLK_FIXED,
80 .data = &virt_16_8m_ck_data,
81};
82
83static const char *osc_sys_ck_parents[] = {
84 "virt_12m_ck",
85 "virt_13m_ck",
86 "virt_19200000_ck",
87 "virt_26000000_ck",
88 "virt_38_4m_ck",
89 "virt_16_8m_ck",
90};
91
92static struct ti_clk_mux osc_sys_ck_data = {
93 .num_parents = ARRAY_SIZE(osc_sys_ck_parents),
94 .reg = 0xd40,
95 .module = TI_CLKM_PRM,
96 .parents = osc_sys_ck_parents,
97};
98
99static struct ti_clk osc_sys_ck = {
100 .name = "osc_sys_ck",
101 .type = TI_CLK_MUX,
102 .data = &osc_sys_ck_data,
103};
104
105static struct ti_clk_divider sys_ck_data = {
106 .parent = "osc_sys_ck",
107 .bit_shift = 6,
108 .max_div = 3,
109 .reg = 0x1270,
110 .module = TI_CLKM_PRM,
111 .flags = CLKF_INDEX_STARTS_AT_ONE,
112};
113
114static struct ti_clk sys_ck = {
115 .name = "sys_ck",
116 .type = TI_CLK_DIVIDER,
117 .data = &sys_ck_data,
118};
119
120static const char *dpll3_ck_parents[] = {
121 "sys_ck",
122 "sys_ck",
123};
124
125static struct ti_clk_dpll dpll3_ck_data = {
126 .num_parents = ARRAY_SIZE(dpll3_ck_parents),
127 .control_reg = 0xd00,
128 .idlest_reg = 0xd20,
129 .mult_div1_reg = 0xd40,
130 .autoidle_reg = 0xd30,
131 .module = TI_CLKM_CM,
132 .parents = dpll3_ck_parents,
133 .flags = CLKF_CORE,
134 .freqsel_mask = 0xf0,
135 .div1_mask = 0x7f00,
136 .idlest_mask = 0x1,
137 .auto_recal_bit = 0x3,
138 .max_divider = 0x80,
139 .min_divider = 0x1,
140 .recal_en_bit = 0x5,
141 .max_multiplier = 0x7ff,
142 .enable_mask = 0x7,
143 .mult_mask = 0x7ff0000,
144 .recal_st_bit = 0x5,
145 .autoidle_mask = 0x7,
146};
147
148static struct ti_clk dpll3_ck = {
149 .name = "dpll3_ck",
150 .clkdm_name = "dpll3_clkdm",
151 .type = TI_CLK_DPLL,
152 .data = &dpll3_ck_data,
153};
154
155static struct ti_clk_divider dpll3_m2_ck_data = {
156 .parent = "dpll3_ck",
157 .bit_shift = 27,
158 .max_div = 31,
159 .reg = 0xd40,
160 .module = TI_CLKM_CM,
161 .flags = CLKF_INDEX_STARTS_AT_ONE,
162};
163
164static struct ti_clk dpll3_m2_ck = {
165 .name = "dpll3_m2_ck",
166 .type = TI_CLK_DIVIDER,
167 .data = &dpll3_m2_ck_data,
168};
169
170static struct ti_clk_fixed_factor core_ck_data = {
171 .parent = "dpll3_m2_ck",
172 .div = 1,
173 .mult = 1,
174};
175
176static struct ti_clk core_ck = {
177 .name = "core_ck",
178 .type = TI_CLK_FIXED_FACTOR,
179 .data = &core_ck_data,
180};
181
182static struct ti_clk_divider l3_ick_data = {
183 .parent = "core_ck",
184 .max_div = 3,
185 .reg = 0xa40,
186 .module = TI_CLKM_CM,
187 .flags = CLKF_INDEX_STARTS_AT_ONE,
188};
189
190static struct ti_clk l3_ick = {
191 .name = "l3_ick",
192 .type = TI_CLK_DIVIDER,
193 .data = &l3_ick_data,
194};
195
196static struct ti_clk_fixed_factor security_l3_ick_data = {
197 .parent = "l3_ick",
198 .div = 1,
199 .mult = 1,
200};
201
202static struct ti_clk security_l3_ick = {
203 .name = "security_l3_ick",
204 .type = TI_CLK_FIXED_FACTOR,
205 .data = &security_l3_ick_data,
206};
207
208static struct ti_clk_fixed_factor wkup_l4_ick_data = {
209 .parent = "sys_ck",
210 .div = 1,
211 .mult = 1,
212};
213
214static struct ti_clk wkup_l4_ick = {
215 .name = "wkup_l4_ick",
216 .type = TI_CLK_FIXED_FACTOR,
217 .data = &wkup_l4_ick_data,
218};
219
220static struct ti_clk_gate usim_ick_data = {
221 .parent = "wkup_l4_ick",
222 .bit_shift = 9,
223 .reg = 0xc10,
224 .module = TI_CLKM_CM,
225 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
226};
227
228static struct ti_clk usim_ick = {
229 .name = "usim_ick",
230 .clkdm_name = "wkup_clkdm",
231 .type = TI_CLK_GATE,
232 .data = &usim_ick_data,
233};
234
235static struct ti_clk_gate dss2_alwon_fck_data = {
236 .parent = "sys_ck",
237 .bit_shift = 1,
238 .reg = 0xe00,
239 .module = TI_CLKM_CM,
240};
241
242static struct ti_clk dss2_alwon_fck = {
243 .name = "dss2_alwon_fck",
244 .clkdm_name = "dss_clkdm",
245 .type = TI_CLK_GATE,
246 .data = &dss2_alwon_fck_data,
247};
248
249static struct ti_clk_divider l4_ick_data = {
250 .parent = "l3_ick",
251 .bit_shift = 2,
252 .max_div = 3,
253 .reg = 0xa40,
254 .module = TI_CLKM_CM,
255 .flags = CLKF_INDEX_STARTS_AT_ONE,
256};
257
258static struct ti_clk l4_ick = {
259 .name = "l4_ick",
260 .type = TI_CLK_DIVIDER,
261 .data = &l4_ick_data,
262};
263
264static struct ti_clk_fixed_factor core_l4_ick_data = {
265 .parent = "l4_ick",
266 .div = 1,
267 .mult = 1,
268};
269
270static struct ti_clk core_l4_ick = {
271 .name = "core_l4_ick",
272 .type = TI_CLK_FIXED_FACTOR,
273 .data = &core_l4_ick_data,
274};
275
276static struct ti_clk_gate mmchs2_ick_data = {
277 .parent = "core_l4_ick",
278 .bit_shift = 25,
279 .reg = 0xa10,
280 .module = TI_CLKM_CM,
281 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
282};
283
284static struct ti_clk mmchs2_ick = {
285 .name = "mmchs2_ick",
286 .clkdm_name = "core_l4_clkdm",
287 .type = TI_CLK_GATE,
288 .data = &mmchs2_ick_data,
289};
290
291static const char *dpll4_ck_parents[] = {
292 "sys_ck",
293 "sys_ck",
294};
295
296static struct ti_clk_dpll dpll4_ck_data = {
297 .num_parents = ARRAY_SIZE(dpll4_ck_parents),
298 .control_reg = 0xd00,
299 .idlest_reg = 0xd20,
300 .mult_div1_reg = 0xd44,
301 .autoidle_reg = 0xd30,
302 .module = TI_CLKM_CM,
303 .parents = dpll4_ck_parents,
304 .flags = CLKF_PER,
305 .freqsel_mask = 0xf00000,
306 .modes = 0x82,
307 .div1_mask = 0x7f,
308 .idlest_mask = 0x2,
309 .auto_recal_bit = 0x13,
310 .max_divider = 0x80,
311 .min_divider = 0x1,
312 .recal_en_bit = 0x6,
313 .max_multiplier = 0x7ff,
314 .enable_mask = 0x70000,
315 .mult_mask = 0x7ff00,
316 .recal_st_bit = 0x6,
317 .autoidle_mask = 0x38,
318};
319
320static struct ti_clk dpll4_ck = {
321 .name = "dpll4_ck",
322 .clkdm_name = "dpll4_clkdm",
323 .type = TI_CLK_DPLL,
324 .data = &dpll4_ck_data,
325};
326
327static struct ti_clk_divider dpll4_m2_ck_data = {
328 .parent = "dpll4_ck",
329 .max_div = 63,
330 .reg = 0xd48,
331 .module = TI_CLKM_CM,
332 .flags = CLKF_INDEX_STARTS_AT_ONE,
333};
334
335static struct ti_clk dpll4_m2_ck = {
336 .name = "dpll4_m2_ck",
337 .type = TI_CLK_DIVIDER,
338 .data = &dpll4_m2_ck_data,
339};
340
341static struct ti_clk_fixed_factor dpll4_m2x2_mul_ck_data = {
342 .parent = "dpll4_m2_ck",
343 .div = 1,
344 .mult = 2,
345};
346
347static struct ti_clk dpll4_m2x2_mul_ck = {
348 .name = "dpll4_m2x2_mul_ck",
349 .type = TI_CLK_FIXED_FACTOR,
350 .data = &dpll4_m2x2_mul_ck_data,
351};
352
353static struct ti_clk_gate dpll4_m2x2_ck_data = {
354 .parent = "dpll4_m2x2_mul_ck",
355 .bit_shift = 0x1b,
356 .reg = 0xd00,
357 .module = TI_CLKM_CM,
358 .flags = CLKF_SET_BIT_TO_DISABLE,
359};
360
361static struct ti_clk dpll4_m2x2_ck = {
362 .name = "dpll4_m2x2_ck",
363 .type = TI_CLK_GATE,
364 .data = &dpll4_m2x2_ck_data,
365};
366
367static struct ti_clk_fixed_factor omap_96m_alwon_fck_data = {
368 .parent = "dpll4_m2x2_ck",
369 .div = 1,
370 .mult = 1,
371};
372
373static struct ti_clk omap_96m_alwon_fck = {
374 .name = "omap_96m_alwon_fck",
375 .type = TI_CLK_FIXED_FACTOR,
376 .data = &omap_96m_alwon_fck_data,
377};
378
379static struct ti_clk_fixed_factor cm_96m_fck_data = {
380 .parent = "omap_96m_alwon_fck",
381 .div = 1,
382 .mult = 1,
383};
384
385static struct ti_clk cm_96m_fck = {
386 .name = "cm_96m_fck",
387 .type = TI_CLK_FIXED_FACTOR,
388 .data = &cm_96m_fck_data,
389};
390
391static const char *omap_96m_fck_parents[] = {
392 "cm_96m_fck",
393 "sys_ck",
394};
395
396static struct ti_clk_mux omap_96m_fck_data = {
397 .bit_shift = 6,
398 .num_parents = ARRAY_SIZE(omap_96m_fck_parents),
399 .reg = 0xd40,
400 .module = TI_CLKM_CM,
401 .parents = omap_96m_fck_parents,
402};
403
404static struct ti_clk omap_96m_fck = {
405 .name = "omap_96m_fck",
406 .type = TI_CLK_MUX,
407 .data = &omap_96m_fck_data,
408};
409
410static struct ti_clk_fixed_factor core_96m_fck_data = {
411 .parent = "omap_96m_fck",
412 .div = 1,
413 .mult = 1,
414};
415
416static struct ti_clk core_96m_fck = {
417 .name = "core_96m_fck",
418 .type = TI_CLK_FIXED_FACTOR,
419 .data = &core_96m_fck_data,
420};
421
422static struct ti_clk_gate mspro_fck_data = {
423 .parent = "core_96m_fck",
424 .bit_shift = 23,
425 .reg = 0xa00,
426 .module = TI_CLKM_CM,
427 .flags = CLKF_WAIT,
428};
429
430static struct ti_clk mspro_fck = {
431 .name = "mspro_fck",
432 .clkdm_name = "core_l4_clkdm",
433 .type = TI_CLK_GATE,
434 .data = &mspro_fck_data,
435};
436
437static struct ti_clk_gate dss_ick_3430es2_data = {
438 .parent = "l4_ick",
439 .bit_shift = 0,
440 .reg = 0xe10,
441 .module = TI_CLKM_CM,
442 .flags = CLKF_DSS | CLKF_OMAP3 | CLKF_INTERFACE,
443};
444
445static struct ti_clk dss_ick_3430es2 = {
446 .name = "dss_ick",
447 .clkdm_name = "dss_clkdm",
448 .type = TI_CLK_GATE,
449 .data = &dss_ick_3430es2_data,
450};
451
452static struct ti_clk_gate uart4_ick_am35xx_data = {
453 .parent = "core_l4_ick",
454 .bit_shift = 23,
455 .reg = 0xa10,
456 .module = TI_CLKM_CM,
457 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
458};
459
460static struct ti_clk uart4_ick_am35xx = {
461 .name = "uart4_ick_am35xx",
462 .clkdm_name = "core_l4_clkdm",
463 .type = TI_CLK_GATE,
464 .data = &uart4_ick_am35xx_data,
465};
466
467static struct ti_clk_fixed_factor security_l4_ick2_data = {
468 .parent = "l4_ick",
469 .div = 1,
470 .mult = 1,
471};
472
473static struct ti_clk security_l4_ick2 = {
474 .name = "security_l4_ick2",
475 .type = TI_CLK_FIXED_FACTOR,
476 .data = &security_l4_ick2_data,
477};
478
479static struct ti_clk_gate aes1_ick_data = {
480 .parent = "security_l4_ick2",
481 .bit_shift = 3,
482 .reg = 0xa14,
483 .module = TI_CLKM_CM,
484 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
485};
486
487static struct ti_clk aes1_ick = {
488 .name = "aes1_ick",
489 .type = TI_CLK_GATE,
490 .data = &aes1_ick_data,
491};
492
493static const char *dpll5_ck_parents[] = {
494 "sys_ck",
495 "sys_ck",
496};
497
498static struct ti_clk_dpll dpll5_ck_data = {
499 .num_parents = ARRAY_SIZE(dpll5_ck_parents),
500 .control_reg = 0xd04,
501 .idlest_reg = 0xd24,
502 .mult_div1_reg = 0xd4c,
503 .autoidle_reg = 0xd34,
504 .module = TI_CLKM_CM,
505 .parents = dpll5_ck_parents,
506 .freqsel_mask = 0xf0,
507 .modes = 0x82,
508 .div1_mask = 0x7f,
509 .idlest_mask = 0x1,
510 .auto_recal_bit = 0x3,
511 .max_divider = 0x80,
512 .min_divider = 0x1,
513 .recal_en_bit = 0x19,
514 .max_multiplier = 0x7ff,
515 .enable_mask = 0x7,
516 .mult_mask = 0x7ff00,
517 .recal_st_bit = 0x19,
518 .autoidle_mask = 0x7,
519};
520
521static struct ti_clk dpll5_ck = {
522 .name = "dpll5_ck",
523 .clkdm_name = "dpll5_clkdm",
524 .type = TI_CLK_DPLL,
525 .data = &dpll5_ck_data,
526};
527
528static struct ti_clk_divider dpll5_m2_ck_data = {
529 .parent = "dpll5_ck",
530 .max_div = 31,
531 .reg = 0xd50,
532 .module = TI_CLKM_CM,
533 .flags = CLKF_INDEX_STARTS_AT_ONE,
534};
535
536static struct ti_clk dpll5_m2_ck = {
537 .name = "dpll5_m2_ck",
538 .type = TI_CLK_DIVIDER,
539 .data = &dpll5_m2_ck_data,
540};
541
542static struct ti_clk_gate usbhost_120m_fck_data = {
543 .parent = "dpll5_m2_ck",
544 .bit_shift = 1,
545 .reg = 0x1400,
546 .module = TI_CLKM_CM,
547};
548
549static struct ti_clk usbhost_120m_fck = {
550 .name = "usbhost_120m_fck",
551 .clkdm_name = "usbhost_clkdm",
552 .type = TI_CLK_GATE,
553 .data = &usbhost_120m_fck_data,
554};
555
556static struct ti_clk_fixed_factor cm_96m_d2_fck_data = {
557 .parent = "cm_96m_fck",
558 .div = 2,
559 .mult = 1,
560};
561
562static struct ti_clk cm_96m_d2_fck = {
563 .name = "cm_96m_d2_fck",
564 .type = TI_CLK_FIXED_FACTOR,
565 .data = &cm_96m_d2_fck_data,
566};
567
568static struct ti_clk_fixed sys_altclk_data = {
569 .frequency = 0x0,
570};
571
572static struct ti_clk sys_altclk = {
573 .name = "sys_altclk",
574 .type = TI_CLK_FIXED,
575 .data = &sys_altclk_data,
576};
577
578static const char *omap_48m_fck_parents[] = {
579 "cm_96m_d2_fck",
580 "sys_altclk",
581};
582
583static struct ti_clk_mux omap_48m_fck_data = {
584 .bit_shift = 3,
585 .num_parents = ARRAY_SIZE(omap_48m_fck_parents),
586 .reg = 0xd40,
587 .module = TI_CLKM_CM,
588 .parents = omap_48m_fck_parents,
589};
590
591static struct ti_clk omap_48m_fck = {
592 .name = "omap_48m_fck",
593 .type = TI_CLK_MUX,
594 .data = &omap_48m_fck_data,
595};
596
597static struct ti_clk_fixed_factor core_48m_fck_data = {
598 .parent = "omap_48m_fck",
599 .div = 1,
600 .mult = 1,
601};
602
603static struct ti_clk core_48m_fck = {
604 .name = "core_48m_fck",
605 .type = TI_CLK_FIXED_FACTOR,
606 .data = &core_48m_fck_data,
607};
608
609static struct ti_clk_fixed mcbsp_clks_data = {
610 .frequency = 0x0,
611};
612
613static struct ti_clk mcbsp_clks = {
614 .name = "mcbsp_clks",
615 .type = TI_CLK_FIXED,
616 .data = &mcbsp_clks_data,
617};
618
619static struct ti_clk_gate mcbsp2_gate_fck_data = {
620 .parent = "mcbsp_clks",
621 .bit_shift = 0,
622 .reg = 0x1000,
623 .module = TI_CLKM_CM,
624};
625
626static struct ti_clk_fixed_factor per_96m_fck_data = {
627 .parent = "omap_96m_alwon_fck",
628 .div = 1,
629 .mult = 1,
630};
631
632static struct ti_clk per_96m_fck = {
633 .name = "per_96m_fck",
634 .type = TI_CLK_FIXED_FACTOR,
635 .data = &per_96m_fck_data,
636};
637
638static const char *mcbsp2_mux_fck_parents[] = {
639 "per_96m_fck",
640 "mcbsp_clks",
641};
642
643static struct ti_clk_mux mcbsp2_mux_fck_data = {
644 .bit_shift = 6,
645 .num_parents = ARRAY_SIZE(mcbsp2_mux_fck_parents),
646 .reg = 0x274,
647 .module = TI_CLKM_SCRM,
648 .parents = mcbsp2_mux_fck_parents,
649};
650
651static struct ti_clk_composite mcbsp2_fck_data = {
652 .mux = &mcbsp2_mux_fck_data,
653 .gate = &mcbsp2_gate_fck_data,
654};
655
656static struct ti_clk mcbsp2_fck = {
657 .name = "mcbsp2_fck",
658 .type = TI_CLK_COMPOSITE,
659 .data = &mcbsp2_fck_data,
660};
661
662static struct ti_clk_fixed_factor dpll3_m2x2_ck_data = {
663 .parent = "dpll3_m2_ck",
664 .div = 1,
665 .mult = 2,
666};
667
668static struct ti_clk dpll3_m2x2_ck = {
669 .name = "dpll3_m2x2_ck",
670 .type = TI_CLK_FIXED_FACTOR,
671 .data = &dpll3_m2x2_ck_data,
672};
673
674static struct ti_clk_fixed_factor corex2_fck_data = {
675 .parent = "dpll3_m2x2_ck",
676 .div = 1,
677 .mult = 1,
678};
679
680static struct ti_clk corex2_fck = {
681 .name = "corex2_fck",
682 .type = TI_CLK_FIXED_FACTOR,
683 .data = &corex2_fck_data,
684};
685
686static struct ti_clk_gate ssi_ssr_gate_fck_3430es1_data = {
687 .parent = "corex2_fck",
688 .bit_shift = 0,
689 .reg = 0xa00,
690 .module = TI_CLKM_CM,
691 .flags = CLKF_NO_WAIT,
692};
693
694static int ssi_ssr_div_fck_3430es1_divs[] = {
695 0,
696 1,
697 2,
698 3,
699 4,
700 0,
701 6,
702 0,
703 8,
704};
705
706static struct ti_clk_divider ssi_ssr_div_fck_3430es1_data = {
707 .num_dividers = ARRAY_SIZE(ssi_ssr_div_fck_3430es1_divs),
708 .parent = "corex2_fck",
709 .bit_shift = 8,
710 .dividers = ssi_ssr_div_fck_3430es1_divs,
711 .reg = 0xa40,
712 .module = TI_CLKM_CM,
713};
714
715static struct ti_clk_composite ssi_ssr_fck_3430es1_data = {
716 .gate = &ssi_ssr_gate_fck_3430es1_data,
717 .divider = &ssi_ssr_div_fck_3430es1_data,
718};
719
720static struct ti_clk ssi_ssr_fck_3430es1 = {
721 .name = "ssi_ssr_fck",
722 .type = TI_CLK_COMPOSITE,
723 .data = &ssi_ssr_fck_3430es1_data,
724};
725
726static struct ti_clk_fixed_factor ssi_sst_fck_3430es1_data = {
727 .parent = "ssi_ssr_fck",
728 .div = 2,
729 .mult = 1,
730};
731
732static struct ti_clk ssi_sst_fck_3430es1 = {
733 .name = "ssi_sst_fck",
734 .type = TI_CLK_FIXED_FACTOR,
735 .data = &ssi_sst_fck_3430es1_data,
736};
737
738static struct ti_clk_fixed omap_32k_fck_data = {
739 .frequency = 32768,
740};
741
742static struct ti_clk omap_32k_fck = {
743 .name = "omap_32k_fck",
744 .type = TI_CLK_FIXED,
745 .data = &omap_32k_fck_data,
746};
747
748static struct ti_clk_fixed_factor per_32k_alwon_fck_data = {
749 .parent = "omap_32k_fck",
750 .div = 1,
751 .mult = 1,
752};
753
754static struct ti_clk per_32k_alwon_fck = {
755 .name = "per_32k_alwon_fck",
756 .type = TI_CLK_FIXED_FACTOR,
757 .data = &per_32k_alwon_fck_data,
758};
759
760static struct ti_clk_gate gpio5_dbck_data = {
761 .parent = "per_32k_alwon_fck",
762 .bit_shift = 16,
763 .reg = 0x1000,
764 .module = TI_CLKM_CM,
765};
766
767static struct ti_clk gpio5_dbck = {
768 .name = "gpio5_dbck",
769 .clkdm_name = "per_clkdm",
770 .type = TI_CLK_GATE,
771 .data = &gpio5_dbck_data,
772};
773
774static struct ti_clk_gate gpt1_ick_data = {
775 .parent = "wkup_l4_ick",
776 .bit_shift = 0,
777 .reg = 0xc10,
778 .module = TI_CLKM_CM,
779 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
780};
781
782static struct ti_clk gpt1_ick = {
783 .name = "gpt1_ick",
784 .clkdm_name = "wkup_clkdm",
785 .type = TI_CLK_GATE,
786 .data = &gpt1_ick_data,
787};
788
789static struct ti_clk_gate mcspi3_fck_data = {
790 .parent = "core_48m_fck",
791 .bit_shift = 20,
792 .reg = 0xa00,
793 .module = TI_CLKM_CM,
794 .flags = CLKF_WAIT,
795};
796
797static struct ti_clk mcspi3_fck = {
798 .name = "mcspi3_fck",
799 .clkdm_name = "core_l4_clkdm",
800 .type = TI_CLK_GATE,
801 .data = &mcspi3_fck_data,
802};
803
804static struct ti_clk_gate gpt2_gate_fck_data = {
805 .parent = "sys_ck",
806 .bit_shift = 3,
807 .reg = 0x1000,
808 .module = TI_CLKM_CM,
809};
810
811static const char *gpt2_mux_fck_parents[] = {
812 "omap_32k_fck",
813 "sys_ck",
814};
815
816static struct ti_clk_mux gpt2_mux_fck_data = {
817 .num_parents = ARRAY_SIZE(gpt2_mux_fck_parents),
818 .reg = 0x1040,
819 .module = TI_CLKM_CM,
820 .parents = gpt2_mux_fck_parents,
821};
822
823static struct ti_clk_composite gpt2_fck_data = {
824 .mux = &gpt2_mux_fck_data,
825 .gate = &gpt2_gate_fck_data,
826};
827
828static struct ti_clk gpt2_fck = {
829 .name = "gpt2_fck",
830 .type = TI_CLK_COMPOSITE,
831 .data = &gpt2_fck_data,
832};
833
834static struct ti_clk_gate gpt10_ick_data = {
835 .parent = "core_l4_ick",
836 .bit_shift = 11,
837 .reg = 0xa10,
838 .module = TI_CLKM_CM,
839 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
840};
841
842static struct ti_clk gpt10_ick = {
843 .name = "gpt10_ick",
844 .clkdm_name = "core_l4_clkdm",
845 .type = TI_CLK_GATE,
846 .data = &gpt10_ick_data,
847};
848
849static struct ti_clk_gate uart2_fck_data = {
850 .parent = "core_48m_fck",
851 .bit_shift = 14,
852 .reg = 0xa00,
853 .module = TI_CLKM_CM,
854 .flags = CLKF_WAIT,
855};
856
857static struct ti_clk uart2_fck = {
858 .name = "uart2_fck",
859 .clkdm_name = "core_l4_clkdm",
860 .type = TI_CLK_GATE,
861 .data = &uart2_fck_data,
862};
863
864static struct ti_clk_fixed_factor sr_l4_ick_data = {
865 .parent = "l4_ick",
866 .div = 1,
867 .mult = 1,
868};
869
870static struct ti_clk sr_l4_ick = {
871 .name = "sr_l4_ick",
872 .type = TI_CLK_FIXED_FACTOR,
873 .data = &sr_l4_ick_data,
874};
875
876static struct ti_clk_fixed_factor omap_96m_d8_fck_data = {
877 .parent = "omap_96m_fck",
878 .div = 8,
879 .mult = 1,
880};
881
882static struct ti_clk omap_96m_d8_fck = {
883 .name = "omap_96m_d8_fck",
884 .type = TI_CLK_FIXED_FACTOR,
885 .data = &omap_96m_d8_fck_data,
886};
887
888static struct ti_clk_divider dpll4_m5_ck_data = {
889 .parent = "dpll4_ck",
890 .max_div = 63,
891 .reg = 0xf40,
892 .module = TI_CLKM_CM,
893 .flags = CLKF_INDEX_STARTS_AT_ONE,
894};
895
896static struct ti_clk dpll4_m5_ck = {
897 .name = "dpll4_m5_ck",
898 .type = TI_CLK_DIVIDER,
899 .data = &dpll4_m5_ck_data,
900};
901
902static struct ti_clk_fixed_factor dpll4_m5x2_mul_ck_data = {
903 .parent = "dpll4_m5_ck",
904 .div = 1,
905 .mult = 2,
906 .flags = CLKF_SET_RATE_PARENT,
907};
908
909static struct ti_clk dpll4_m5x2_mul_ck = {
910 .name = "dpll4_m5x2_mul_ck",
911 .type = TI_CLK_FIXED_FACTOR,
912 .data = &dpll4_m5x2_mul_ck_data,
913};
914
915static struct ti_clk_gate dpll4_m5x2_ck_data = {
916 .parent = "dpll4_m5x2_mul_ck",
917 .bit_shift = 0x1e,
918 .reg = 0xd00,
919 .module = TI_CLKM_CM,
920 .flags = CLKF_SET_BIT_TO_DISABLE,
921};
922
923static struct ti_clk dpll4_m5x2_ck = {
924 .name = "dpll4_m5x2_ck",
925 .type = TI_CLK_GATE,
926 .data = &dpll4_m5x2_ck_data,
927};
928
929static struct ti_clk_gate cam_mclk_data = {
930 .parent = "dpll4_m5x2_ck",
931 .bit_shift = 0,
932 .reg = 0xf00,
933 .module = TI_CLKM_CM,
934 .flags = CLKF_SET_RATE_PARENT,
935};
936
937static struct ti_clk cam_mclk = {
938 .name = "cam_mclk",
939 .type = TI_CLK_GATE,
940 .data = &cam_mclk_data,
941};
942
943static struct ti_clk_gate mcbsp3_gate_fck_data = {
944 .parent = "mcbsp_clks",
945 .bit_shift = 1,
946 .reg = 0x1000,
947 .module = TI_CLKM_CM,
948};
949
950static const char *mcbsp3_mux_fck_parents[] = {
951 "per_96m_fck",
952 "mcbsp_clks",
953};
954
955static struct ti_clk_mux mcbsp3_mux_fck_data = {
956 .num_parents = ARRAY_SIZE(mcbsp3_mux_fck_parents),
957 .reg = 0x2d8,
958 .module = TI_CLKM_SCRM,
959 .parents = mcbsp3_mux_fck_parents,
960};
961
962static struct ti_clk_composite mcbsp3_fck_data = {
963 .mux = &mcbsp3_mux_fck_data,
964 .gate = &mcbsp3_gate_fck_data,
965};
966
967static struct ti_clk mcbsp3_fck = {
968 .name = "mcbsp3_fck",
969 .type = TI_CLK_COMPOSITE,
970 .data = &mcbsp3_fck_data,
971};
972
973static struct ti_clk_gate csi2_96m_fck_data = {
974 .parent = "core_96m_fck",
975 .bit_shift = 1,
976 .reg = 0xf00,
977 .module = TI_CLKM_CM,
978};
979
980static struct ti_clk csi2_96m_fck = {
981 .name = "csi2_96m_fck",
982 .clkdm_name = "cam_clkdm",
983 .type = TI_CLK_GATE,
984 .data = &csi2_96m_fck_data,
985};
986
987static struct ti_clk_gate gpt9_gate_fck_data = {
988 .parent = "sys_ck",
989 .bit_shift = 10,
990 .reg = 0x1000,
991 .module = TI_CLKM_CM,
992};
993
994static const char *gpt9_mux_fck_parents[] = {
995 "omap_32k_fck",
996 "sys_ck",
997};
998
999static struct ti_clk_mux gpt9_mux_fck_data = {
1000 .bit_shift = 7,
1001 .num_parents = ARRAY_SIZE(gpt9_mux_fck_parents),
1002 .reg = 0x1040,
1003 .module = TI_CLKM_CM,
1004 .parents = gpt9_mux_fck_parents,
1005};
1006
1007static struct ti_clk_composite gpt9_fck_data = {
1008 .mux = &gpt9_mux_fck_data,
1009 .gate = &gpt9_gate_fck_data,
1010};
1011
1012static struct ti_clk gpt9_fck = {
1013 .name = "gpt9_fck",
1014 .type = TI_CLK_COMPOSITE,
1015 .data = &gpt9_fck_data,
1016};
1017
1018static struct ti_clk_divider dpll3_m3_ck_data = {
1019 .parent = "dpll3_ck",
1020 .bit_shift = 16,
1021 .max_div = 31,
1022 .reg = 0x1140,
1023 .module = TI_CLKM_CM,
1024 .flags = CLKF_INDEX_STARTS_AT_ONE,
1025};
1026
1027static struct ti_clk dpll3_m3_ck = {
1028 .name = "dpll3_m3_ck",
1029 .type = TI_CLK_DIVIDER,
1030 .data = &dpll3_m3_ck_data,
1031};
1032
1033static struct ti_clk_fixed_factor dpll3_m3x2_mul_ck_data = {
1034 .parent = "dpll3_m3_ck",
1035 .div = 1,
1036 .mult = 2,
1037};
1038
1039static struct ti_clk dpll3_m3x2_mul_ck = {
1040 .name = "dpll3_m3x2_mul_ck",
1041 .type = TI_CLK_FIXED_FACTOR,
1042 .data = &dpll3_m3x2_mul_ck_data,
1043};
1044
1045static struct ti_clk_gate sr2_fck_data = {
1046 .parent = "sys_ck",
1047 .bit_shift = 7,
1048 .reg = 0xc00,
1049 .module = TI_CLKM_CM,
1050 .flags = CLKF_WAIT,
1051};
1052
1053static struct ti_clk sr2_fck = {
1054 .name = "sr2_fck",
1055 .clkdm_name = "wkup_clkdm",
1056 .type = TI_CLK_GATE,
1057 .data = &sr2_fck_data,
1058};
1059
1060static struct ti_clk_fixed pclk_ck_data = {
1061 .frequency = 27000000,
1062};
1063
1064static struct ti_clk pclk_ck = {
1065 .name = "pclk_ck",
1066 .type = TI_CLK_FIXED,
1067 .data = &pclk_ck_data,
1068};
1069
1070static struct ti_clk_gate wdt2_ick_data = {
1071 .parent = "wkup_l4_ick",
1072 .bit_shift = 5,
1073 .reg = 0xc10,
1074 .module = TI_CLKM_CM,
1075 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1076};
1077
1078static struct ti_clk wdt2_ick = {
1079 .name = "wdt2_ick",
1080 .clkdm_name = "wkup_clkdm",
1081 .type = TI_CLK_GATE,
1082 .data = &wdt2_ick_data,
1083};
1084
1085static struct ti_clk_fixed_factor core_l3_ick_data = {
1086 .parent = "l3_ick",
1087 .div = 1,
1088 .mult = 1,
1089};
1090
1091static struct ti_clk core_l3_ick = {
1092 .name = "core_l3_ick",
1093 .type = TI_CLK_FIXED_FACTOR,
1094 .data = &core_l3_ick_data,
1095};
1096
1097static struct ti_clk_gate mcspi4_fck_data = {
1098 .parent = "core_48m_fck",
1099 .bit_shift = 21,
1100 .reg = 0xa00,
1101 .module = TI_CLKM_CM,
1102 .flags = CLKF_WAIT,
1103};
1104
1105static struct ti_clk mcspi4_fck = {
1106 .name = "mcspi4_fck",
1107 .clkdm_name = "core_l4_clkdm",
1108 .type = TI_CLK_GATE,
1109 .data = &mcspi4_fck_data,
1110};
1111
1112static struct ti_clk_fixed_factor per_48m_fck_data = {
1113 .parent = "omap_48m_fck",
1114 .div = 1,
1115 .mult = 1,
1116};
1117
1118static struct ti_clk per_48m_fck = {
1119 .name = "per_48m_fck",
1120 .type = TI_CLK_FIXED_FACTOR,
1121 .data = &per_48m_fck_data,
1122};
1123
1124static struct ti_clk_gate uart4_fck_data = {
1125 .parent = "per_48m_fck",
1126 .bit_shift = 18,
1127 .reg = 0x1000,
1128 .module = TI_CLKM_CM,
1129 .flags = CLKF_WAIT,
1130};
1131
1132static struct ti_clk uart4_fck = {
1133 .name = "uart4_fck",
1134 .clkdm_name = "per_clkdm",
1135 .type = TI_CLK_GATE,
1136 .data = &uart4_fck_data,
1137};
1138
1139static struct ti_clk_fixed_factor omap_96m_d10_fck_data = {
1140 .parent = "omap_96m_fck",
1141 .div = 10,
1142 .mult = 1,
1143};
1144
1145static struct ti_clk omap_96m_d10_fck = {
1146 .name = "omap_96m_d10_fck",
1147 .type = TI_CLK_FIXED_FACTOR,
1148 .data = &omap_96m_d10_fck_data,
1149};
1150
1151static struct ti_clk_gate usim_gate_fck_data = {
1152 .parent = "omap_96m_fck",
1153 .bit_shift = 9,
1154 .reg = 0xc00,
1155 .module = TI_CLKM_CM,
1156};
1157
1158static struct ti_clk_fixed_factor per_l4_ick_data = {
1159 .parent = "l4_ick",
1160 .div = 1,
1161 .mult = 1,
1162};
1163
1164static struct ti_clk per_l4_ick = {
1165 .name = "per_l4_ick",
1166 .type = TI_CLK_FIXED_FACTOR,
1167 .data = &per_l4_ick_data,
1168};
1169
1170static struct ti_clk_gate gpt5_ick_data = {
1171 .parent = "per_l4_ick",
1172 .bit_shift = 6,
1173 .reg = 0x1010,
1174 .module = TI_CLKM_CM,
1175 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1176};
1177
1178static struct ti_clk gpt5_ick = {
1179 .name = "gpt5_ick",
1180 .clkdm_name = "per_clkdm",
1181 .type = TI_CLK_GATE,
1182 .data = &gpt5_ick_data,
1183};
1184
1185static struct ti_clk_gate mcspi2_ick_data = {
1186 .parent = "core_l4_ick",
1187 .bit_shift = 19,
1188 .reg = 0xa10,
1189 .module = TI_CLKM_CM,
1190 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1191};
1192
1193static struct ti_clk mcspi2_ick = {
1194 .name = "mcspi2_ick",
1195 .clkdm_name = "core_l4_clkdm",
1196 .type = TI_CLK_GATE,
1197 .data = &mcspi2_ick_data,
1198};
1199
1200static struct ti_clk_fixed_factor ssi_l4_ick_data = {
1201 .parent = "l4_ick",
1202 .div = 1,
1203 .mult = 1,
1204};
1205
1206static struct ti_clk ssi_l4_ick = {
1207 .name = "ssi_l4_ick",
1208 .clkdm_name = "core_l4_clkdm",
1209 .type = TI_CLK_FIXED_FACTOR,
1210 .data = &ssi_l4_ick_data,
1211};
1212
1213static struct ti_clk_gate ssi_ick_3430es1_data = {
1214 .parent = "ssi_l4_ick",
1215 .bit_shift = 0,
1216 .reg = 0xa10,
1217 .module = TI_CLKM_CM,
1218 .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
1219};
1220
1221static struct ti_clk ssi_ick_3430es1 = {
1222 .name = "ssi_ick",
1223 .clkdm_name = "core_l4_clkdm",
1224 .type = TI_CLK_GATE,
1225 .data = &ssi_ick_3430es1_data,
1226};
1227
1228static struct ti_clk_gate i2c2_fck_data = {
1229 .parent = "core_96m_fck",
1230 .bit_shift = 16,
1231 .reg = 0xa00,
1232 .module = TI_CLKM_CM,
1233 .flags = CLKF_WAIT,
1234};
1235
1236static struct ti_clk i2c2_fck = {
1237 .name = "i2c2_fck",
1238 .clkdm_name = "core_l4_clkdm",
1239 .type = TI_CLK_GATE,
1240 .data = &i2c2_fck_data,
1241};
1242
1243static struct ti_clk_divider dpll1_fck_data = {
1244 .parent = "core_ck",
1245 .bit_shift = 19,
1246 .max_div = 7,
1247 .reg = 0x940,
1248 .module = TI_CLKM_CM,
1249 .flags = CLKF_INDEX_STARTS_AT_ONE,
1250};
1251
1252static struct ti_clk dpll1_fck = {
1253 .name = "dpll1_fck",
1254 .type = TI_CLK_DIVIDER,
1255 .data = &dpll1_fck_data,
1256};
1257
1258static const char *dpll1_ck_parents[] = {
1259 "sys_ck",
1260 "dpll1_fck",
1261};
1262
1263static struct ti_clk_dpll dpll1_ck_data = {
1264 .num_parents = ARRAY_SIZE(dpll1_ck_parents),
1265 .control_reg = 0x904,
1266 .idlest_reg = 0x924,
1267 .mult_div1_reg = 0x940,
1268 .autoidle_reg = 0x934,
1269 .module = TI_CLKM_CM,
1270 .parents = dpll1_ck_parents,
1271 .freqsel_mask = 0xf0,
1272 .modes = 0xa0,
1273 .div1_mask = 0x7f,
1274 .idlest_mask = 0x1,
1275 .auto_recal_bit = 0x3,
1276 .max_divider = 0x80,
1277 .min_divider = 0x1,
1278 .recal_en_bit = 0x7,
1279 .max_multiplier = 0x7ff,
1280 .enable_mask = 0x7,
1281 .mult_mask = 0x7ff00,
1282 .recal_st_bit = 0x7,
1283 .autoidle_mask = 0x7,
1284};
1285
1286static struct ti_clk dpll1_ck = {
1287 .name = "dpll1_ck",
1288 .clkdm_name = "dpll1_clkdm",
1289 .type = TI_CLK_DPLL,
1290 .data = &dpll1_ck_data,
1291};
1292
1293static struct ti_clk_fixed secure_32k_fck_data = {
1294 .frequency = 32768,
1295};
1296
1297static struct ti_clk secure_32k_fck = {
1298 .name = "secure_32k_fck",
1299 .type = TI_CLK_FIXED,
1300 .data = &secure_32k_fck_data,
1301};
1302
1303static struct ti_clk_gate gpio5_ick_data = {
1304 .parent = "per_l4_ick",
1305 .bit_shift = 16,
1306 .reg = 0x1010,
1307 .module = TI_CLKM_CM,
1308 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1309};
1310
1311static struct ti_clk gpio5_ick = {
1312 .name = "gpio5_ick",
1313 .clkdm_name = "per_clkdm",
1314 .type = TI_CLK_GATE,
1315 .data = &gpio5_ick_data,
1316};
1317
1318static struct ti_clk_divider dpll4_m4_ck_data = {
1319 .parent = "dpll4_ck",
1320 .max_div = 32,
1321 .reg = 0xe40,
1322 .module = TI_CLKM_CM,
1323 .flags = CLKF_INDEX_STARTS_AT_ONE,
1324};
1325
1326static struct ti_clk dpll4_m4_ck = {
1327 .name = "dpll4_m4_ck",
1328 .type = TI_CLK_DIVIDER,
1329 .data = &dpll4_m4_ck_data,
1330};
1331
1332static struct ti_clk_fixed_factor dpll4_m4x2_mul_ck_data = {
1333 .parent = "dpll4_m4_ck",
1334 .div = 1,
1335 .mult = 2,
1336 .flags = CLKF_SET_RATE_PARENT,
1337};
1338
1339static struct ti_clk dpll4_m4x2_mul_ck = {
1340 .name = "dpll4_m4x2_mul_ck",
1341 .type = TI_CLK_FIXED_FACTOR,
1342 .data = &dpll4_m4x2_mul_ck_data,
1343};
1344
1345static struct ti_clk_gate dpll4_m4x2_ck_data = {
1346 .parent = "dpll4_m4x2_mul_ck",
1347 .bit_shift = 0x1d,
1348 .reg = 0xd00,
1349 .module = TI_CLKM_CM,
1350 .flags = CLKF_SET_RATE_PARENT | CLKF_SET_BIT_TO_DISABLE,
1351};
1352
1353static struct ti_clk dpll4_m4x2_ck = {
1354 .name = "dpll4_m4x2_ck",
1355 .type = TI_CLK_GATE,
1356 .data = &dpll4_m4x2_ck_data,
1357};
1358
1359static struct ti_clk_gate dss1_alwon_fck_3430es2_data = {
1360 .parent = "dpll4_m4x2_ck",
1361 .bit_shift = 0,
1362 .reg = 0xe00,
1363 .module = TI_CLKM_CM,
1364 .flags = CLKF_DSS | CLKF_SET_RATE_PARENT,
1365};
1366
1367static struct ti_clk dss1_alwon_fck_3430es2 = {
1368 .name = "dss1_alwon_fck",
1369 .clkdm_name = "dss_clkdm",
1370 .type = TI_CLK_GATE,
1371 .data = &dss1_alwon_fck_3430es2_data,
1372};
1373
1374static struct ti_clk_gate uart3_ick_data = {
1375 .parent = "per_l4_ick",
1376 .bit_shift = 11,
1377 .reg = 0x1010,
1378 .module = TI_CLKM_CM,
1379 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1380};
1381
1382static struct ti_clk uart3_ick = {
1383 .name = "uart3_ick",
1384 .clkdm_name = "per_clkdm",
1385 .type = TI_CLK_GATE,
1386 .data = &uart3_ick_data,
1387};
1388
1389static struct ti_clk_divider dpll4_m3_ck_data = {
1390 .parent = "dpll4_ck",
1391 .bit_shift = 8,
1392 .max_div = 32,
1393 .reg = 0xe40,
1394 .module = TI_CLKM_CM,
1395 .flags = CLKF_INDEX_STARTS_AT_ONE,
1396};
1397
1398static struct ti_clk dpll4_m3_ck = {
1399 .name = "dpll4_m3_ck",
1400 .type = TI_CLK_DIVIDER,
1401 .data = &dpll4_m3_ck_data,
1402};
1403
1404static struct ti_clk_gate mcbsp3_ick_data = {
1405 .parent = "per_l4_ick",
1406 .bit_shift = 1,
1407 .reg = 0x1010,
1408 .module = TI_CLKM_CM,
1409 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1410};
1411
1412static struct ti_clk mcbsp3_ick = {
1413 .name = "mcbsp3_ick",
1414 .clkdm_name = "per_clkdm",
1415 .type = TI_CLK_GATE,
1416 .data = &mcbsp3_ick_data,
1417};
1418
1419static struct ti_clk_gate gpio3_dbck_data = {
1420 .parent = "per_32k_alwon_fck",
1421 .bit_shift = 14,
1422 .reg = 0x1000,
1423 .module = TI_CLKM_CM,
1424};
1425
1426static struct ti_clk gpio3_dbck = {
1427 .name = "gpio3_dbck",
1428 .clkdm_name = "per_clkdm",
1429 .type = TI_CLK_GATE,
1430 .data = &gpio3_dbck_data,
1431};
1432
1433static struct ti_clk_gate fac_ick_data = {
1434 .parent = "core_l4_ick",
1435 .bit_shift = 8,
1436 .reg = 0xa10,
1437 .module = TI_CLKM_CM,
1438 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1439};
1440
1441static struct ti_clk fac_ick = {
1442 .name = "fac_ick",
1443 .clkdm_name = "core_l4_clkdm",
1444 .type = TI_CLK_GATE,
1445 .data = &fac_ick_data,
1446};
1447
1448static struct ti_clk_gate clkout2_src_gate_ck_data = {
1449 .parent = "core_ck",
1450 .bit_shift = 7,
1451 .reg = 0xd70,
1452 .module = TI_CLKM_CM,
1453 .flags = CLKF_NO_WAIT,
1454};
1455
1456static struct ti_clk_fixed_factor dpll4_m3x2_mul_ck_data = {
1457 .parent = "dpll4_m3_ck",
1458 .div = 1,
1459 .mult = 2,
1460};
1461
1462static struct ti_clk dpll4_m3x2_mul_ck = {
1463 .name = "dpll4_m3x2_mul_ck",
1464 .type = TI_CLK_FIXED_FACTOR,
1465 .data = &dpll4_m3x2_mul_ck_data,
1466};
1467
1468static struct ti_clk_gate dpll4_m3x2_ck_data = {
1469 .parent = "dpll4_m3x2_mul_ck",
1470 .bit_shift = 0x1c,
1471 .reg = 0xd00,
1472 .module = TI_CLKM_CM,
1473 .flags = CLKF_SET_BIT_TO_DISABLE,
1474};
1475
1476static struct ti_clk dpll4_m3x2_ck = {
1477 .name = "dpll4_m3x2_ck",
1478 .type = TI_CLK_GATE,
1479 .data = &dpll4_m3x2_ck_data,
1480};
1481
1482static const char *omap_54m_fck_parents[] = {
1483 "dpll4_m3x2_ck",
1484 "sys_altclk",
1485};
1486
1487static struct ti_clk_mux omap_54m_fck_data = {
1488 .bit_shift = 5,
1489 .num_parents = ARRAY_SIZE(omap_54m_fck_parents),
1490 .reg = 0xd40,
1491 .module = TI_CLKM_CM,
1492 .parents = omap_54m_fck_parents,
1493};
1494
1495static struct ti_clk omap_54m_fck = {
1496 .name = "omap_54m_fck",
1497 .type = TI_CLK_MUX,
1498 .data = &omap_54m_fck_data,
1499};
1500
1501static const char *clkout2_src_mux_ck_parents[] = {
1502 "core_ck",
1503 "sys_ck",
1504 "cm_96m_fck",
1505 "omap_54m_fck",
1506};
1507
1508static struct ti_clk_mux clkout2_src_mux_ck_data = {
1509 .num_parents = ARRAY_SIZE(clkout2_src_mux_ck_parents),
1510 .reg = 0xd70,
1511 .module = TI_CLKM_CM,
1512 .parents = clkout2_src_mux_ck_parents,
1513};
1514
1515static struct ti_clk_composite clkout2_src_ck_data = {
1516 .mux = &clkout2_src_mux_ck_data,
1517 .gate = &clkout2_src_gate_ck_data,
1518};
1519
1520static struct ti_clk clkout2_src_ck = {
1521 .name = "clkout2_src_ck",
1522 .type = TI_CLK_COMPOSITE,
1523 .data = &clkout2_src_ck_data,
1524};
1525
1526static struct ti_clk_gate i2c1_fck_data = {
1527 .parent = "core_96m_fck",
1528 .bit_shift = 15,
1529 .reg = 0xa00,
1530 .module = TI_CLKM_CM,
1531 .flags = CLKF_WAIT,
1532};
1533
1534static struct ti_clk i2c1_fck = {
1535 .name = "i2c1_fck",
1536 .clkdm_name = "core_l4_clkdm",
1537 .type = TI_CLK_GATE,
1538 .data = &i2c1_fck_data,
1539};
1540
1541static struct ti_clk_gate wdt3_fck_data = {
1542 .parent = "per_32k_alwon_fck",
1543 .bit_shift = 12,
1544 .reg = 0x1000,
1545 .module = TI_CLKM_CM,
1546 .flags = CLKF_WAIT,
1547};
1548
1549static struct ti_clk wdt3_fck = {
1550 .name = "wdt3_fck",
1551 .clkdm_name = "per_clkdm",
1552 .type = TI_CLK_GATE,
1553 .data = &wdt3_fck_data,
1554};
1555
1556static struct ti_clk_gate gpt7_gate_fck_data = {
1557 .parent = "sys_ck",
1558 .bit_shift = 8,
1559 .reg = 0x1000,
1560 .module = TI_CLKM_CM,
1561};
1562
1563static const char *gpt7_mux_fck_parents[] = {
1564 "omap_32k_fck",
1565 "sys_ck",
1566};
1567
1568static struct ti_clk_mux gpt7_mux_fck_data = {
1569 .bit_shift = 5,
1570 .num_parents = ARRAY_SIZE(gpt7_mux_fck_parents),
1571 .reg = 0x1040,
1572 .module = TI_CLKM_CM,
1573 .parents = gpt7_mux_fck_parents,
1574};
1575
1576static struct ti_clk_composite gpt7_fck_data = {
1577 .mux = &gpt7_mux_fck_data,
1578 .gate = &gpt7_gate_fck_data,
1579};
1580
1581static struct ti_clk gpt7_fck = {
1582 .name = "gpt7_fck",
1583 .type = TI_CLK_COMPOSITE,
1584 .data = &gpt7_fck_data,
1585};
1586
1587static struct ti_clk_gate usb_l4_gate_ick_data = {
1588 .parent = "l4_ick",
1589 .bit_shift = 5,
1590 .reg = 0xa10,
1591 .module = TI_CLKM_CM,
1592 .flags = CLKF_INTERFACE,
1593};
1594
1595static struct ti_clk_divider usb_l4_div_ick_data = {
1596 .parent = "l4_ick",
1597 .bit_shift = 4,
1598 .max_div = 1,
1599 .reg = 0xa40,
1600 .module = TI_CLKM_CM,
1601 .flags = CLKF_INDEX_STARTS_AT_ONE,
1602};
1603
1604static struct ti_clk_composite usb_l4_ick_data = {
1605 .gate = &usb_l4_gate_ick_data,
1606 .divider = &usb_l4_div_ick_data,
1607};
1608
1609static struct ti_clk usb_l4_ick = {
1610 .name = "usb_l4_ick",
1611 .type = TI_CLK_COMPOSITE,
1612 .data = &usb_l4_ick_data,
1613};
1614
1615static struct ti_clk_gate uart4_ick_data = {
1616 .parent = "per_l4_ick",
1617 .bit_shift = 18,
1618 .reg = 0x1010,
1619 .module = TI_CLKM_CM,
1620 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1621};
1622
1623static struct ti_clk uart4_ick = {
1624 .name = "uart4_ick",
1625 .clkdm_name = "per_clkdm",
1626 .type = TI_CLK_GATE,
1627 .data = &uart4_ick_data,
1628};
1629
1630static struct ti_clk_fixed dummy_ck_data = {
1631 .frequency = 0,
1632};
1633
1634static struct ti_clk dummy_ck = {
1635 .name = "dummy_ck",
1636 .type = TI_CLK_FIXED,
1637 .data = &dummy_ck_data,
1638};
1639
1640static const char *gpt3_mux_fck_parents[] = {
1641 "omap_32k_fck",
1642 "sys_ck",
1643};
1644
1645static struct ti_clk_mux gpt3_mux_fck_data = {
1646 .bit_shift = 1,
1647 .num_parents = ARRAY_SIZE(gpt3_mux_fck_parents),
1648 .reg = 0x1040,
1649 .module = TI_CLKM_CM,
1650 .parents = gpt3_mux_fck_parents,
1651};
1652
1653static struct ti_clk_gate gpt9_ick_data = {
1654 .parent = "per_l4_ick",
1655 .bit_shift = 10,
1656 .reg = 0x1010,
1657 .module = TI_CLKM_CM,
1658 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1659};
1660
1661static struct ti_clk gpt9_ick = {
1662 .name = "gpt9_ick",
1663 .clkdm_name = "per_clkdm",
1664 .type = TI_CLK_GATE,
1665 .data = &gpt9_ick_data,
1666};
1667
1668static struct ti_clk_gate gpt10_gate_fck_data = {
1669 .parent = "sys_ck",
1670 .bit_shift = 11,
1671 .reg = 0xa00,
1672 .module = TI_CLKM_CM,
1673};
1674
1675static struct ti_clk_gate dss_ick_3430es1_data = {
1676 .parent = "l4_ick",
1677 .bit_shift = 0,
1678 .reg = 0xe10,
1679 .module = TI_CLKM_CM,
1680 .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
1681};
1682
1683static struct ti_clk dss_ick_3430es1 = {
1684 .name = "dss_ick",
1685 .clkdm_name = "dss_clkdm",
1686 .type = TI_CLK_GATE,
1687 .data = &dss_ick_3430es1_data,
1688};
1689
1690static struct ti_clk_gate gpt11_ick_data = {
1691 .parent = "core_l4_ick",
1692 .bit_shift = 12,
1693 .reg = 0xa10,
1694 .module = TI_CLKM_CM,
1695 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1696};
1697
1698static struct ti_clk gpt11_ick = {
1699 .name = "gpt11_ick",
1700 .clkdm_name = "core_l4_clkdm",
1701 .type = TI_CLK_GATE,
1702 .data = &gpt11_ick_data,
1703};
1704
1705static struct ti_clk_divider dpll2_fck_data = {
1706 .parent = "core_ck",
1707 .bit_shift = 19,
1708 .max_div = 7,
1709 .reg = 0x40,
1710 .module = TI_CLKM_CM,
1711 .flags = CLKF_INDEX_STARTS_AT_ONE,
1712};
1713
1714static struct ti_clk dpll2_fck = {
1715 .name = "dpll2_fck",
1716 .type = TI_CLK_DIVIDER,
1717 .data = &dpll2_fck_data,
1718};
1719
1720static struct ti_clk_gate uart1_fck_data = {
1721 .parent = "core_48m_fck",
1722 .bit_shift = 13,
1723 .reg = 0xa00,
1724 .module = TI_CLKM_CM,
1725 .flags = CLKF_WAIT,
1726};
1727
1728static struct ti_clk uart1_fck = {
1729 .name = "uart1_fck",
1730 .clkdm_name = "core_l4_clkdm",
1731 .type = TI_CLK_GATE,
1732 .data = &uart1_fck_data,
1733};
1734
1735static struct ti_clk_gate hsotgusb_ick_3430es1_data = {
1736 .parent = "core_l3_ick",
1737 .bit_shift = 4,
1738 .reg = 0xa10,
1739 .module = TI_CLKM_CM,
1740 .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
1741};
1742
1743static struct ti_clk hsotgusb_ick_3430es1 = {
1744 .name = "hsotgusb_ick_3430es1",
1745 .clkdm_name = "core_l3_clkdm",
1746 .type = TI_CLK_GATE,
1747 .data = &hsotgusb_ick_3430es1_data,
1748};
1749
1750static struct ti_clk_gate gpio2_ick_data = {
1751 .parent = "per_l4_ick",
1752 .bit_shift = 13,
1753 .reg = 0x1010,
1754 .module = TI_CLKM_CM,
1755 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1756};
1757
1758static struct ti_clk gpio2_ick = {
1759 .name = "gpio2_ick",
1760 .clkdm_name = "per_clkdm",
1761 .type = TI_CLK_GATE,
1762 .data = &gpio2_ick_data,
1763};
1764
1765static struct ti_clk_gate mmchs1_ick_data = {
1766 .parent = "core_l4_ick",
1767 .bit_shift = 24,
1768 .reg = 0xa10,
1769 .module = TI_CLKM_CM,
1770 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1771};
1772
1773static struct ti_clk mmchs1_ick = {
1774 .name = "mmchs1_ick",
1775 .clkdm_name = "core_l4_clkdm",
1776 .type = TI_CLK_GATE,
1777 .data = &mmchs1_ick_data,
1778};
1779
1780static struct ti_clk_gate modem_fck_data = {
1781 .parent = "sys_ck",
1782 .bit_shift = 31,
1783 .reg = 0xa00,
1784 .module = TI_CLKM_CM,
1785 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1786};
1787
1788static struct ti_clk modem_fck = {
1789 .name = "modem_fck",
1790 .clkdm_name = "d2d_clkdm",
1791 .type = TI_CLK_GATE,
1792 .data = &modem_fck_data,
1793};
1794
1795static struct ti_clk_gate mcbsp4_ick_data = {
1796 .parent = "per_l4_ick",
1797 .bit_shift = 2,
1798 .reg = 0x1010,
1799 .module = TI_CLKM_CM,
1800 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1801};
1802
1803static struct ti_clk mcbsp4_ick = {
1804 .name = "mcbsp4_ick",
1805 .clkdm_name = "per_clkdm",
1806 .type = TI_CLK_GATE,
1807 .data = &mcbsp4_ick_data,
1808};
1809
1810static struct ti_clk_gate gpio1_ick_data = {
1811 .parent = "wkup_l4_ick",
1812 .bit_shift = 3,
1813 .reg = 0xc10,
1814 .module = TI_CLKM_CM,
1815 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1816};
1817
1818static struct ti_clk gpio1_ick = {
1819 .name = "gpio1_ick",
1820 .clkdm_name = "wkup_clkdm",
1821 .type = TI_CLK_GATE,
1822 .data = &gpio1_ick_data,
1823};
1824
1825static const char *gpt6_mux_fck_parents[] = {
1826 "omap_32k_fck",
1827 "sys_ck",
1828};
1829
1830static struct ti_clk_mux gpt6_mux_fck_data = {
1831 .bit_shift = 4,
1832 .num_parents = ARRAY_SIZE(gpt6_mux_fck_parents),
1833 .reg = 0x1040,
1834 .module = TI_CLKM_CM,
1835 .parents = gpt6_mux_fck_parents,
1836};
1837
1838static struct ti_clk_fixed_factor dpll1_x2_ck_data = {
1839 .parent = "dpll1_ck",
1840 .div = 1,
1841 .mult = 2,
1842};
1843
1844static struct ti_clk dpll1_x2_ck = {
1845 .name = "dpll1_x2_ck",
1846 .type = TI_CLK_FIXED_FACTOR,
1847 .data = &dpll1_x2_ck_data,
1848};
1849
1850static struct ti_clk_divider dpll1_x2m2_ck_data = {
1851 .parent = "dpll1_x2_ck",
1852 .max_div = 31,
1853 .reg = 0x944,
1854 .module = TI_CLKM_CM,
1855 .flags = CLKF_INDEX_STARTS_AT_ONE,
1856};
1857
1858static struct ti_clk dpll1_x2m2_ck = {
1859 .name = "dpll1_x2m2_ck",
1860 .type = TI_CLK_DIVIDER,
1861 .data = &dpll1_x2m2_ck_data,
1862};
1863
1864static struct ti_clk_fixed_factor mpu_ck_data = {
1865 .parent = "dpll1_x2m2_ck",
1866 .div = 1,
1867 .mult = 1,
1868};
1869
1870static struct ti_clk mpu_ck = {
1871 .name = "mpu_ck",
1872 .type = TI_CLK_FIXED_FACTOR,
1873 .data = &mpu_ck_data,
1874};
1875
1876static struct ti_clk_divider arm_fck_data = {
1877 .parent = "mpu_ck",
1878 .max_div = 2,
1879 .reg = 0x924,
1880 .module = TI_CLKM_CM,
1881};
1882
1883static struct ti_clk arm_fck = {
1884 .name = "arm_fck",
1885 .type = TI_CLK_DIVIDER,
1886 .data = &arm_fck_data,
1887};
1888
1889static struct ti_clk_fixed_factor core_d3_ck_data = {
1890 .parent = "core_ck",
1891 .div = 3,
1892 .mult = 1,
1893};
1894
1895static struct ti_clk core_d3_ck = {
1896 .name = "core_d3_ck",
1897 .type = TI_CLK_FIXED_FACTOR,
1898 .data = &core_d3_ck_data,
1899};
1900
1901static struct ti_clk_gate gpt11_gate_fck_data = {
1902 .parent = "sys_ck",
1903 .bit_shift = 12,
1904 .reg = 0xa00,
1905 .module = TI_CLKM_CM,
1906};
1907
1908static const char *gpt11_mux_fck_parents[] = {
1909 "omap_32k_fck",
1910 "sys_ck",
1911};
1912
1913static struct ti_clk_mux gpt11_mux_fck_data = {
1914 .bit_shift = 7,
1915 .num_parents = ARRAY_SIZE(gpt11_mux_fck_parents),
1916 .reg = 0xa40,
1917 .module = TI_CLKM_CM,
1918 .parents = gpt11_mux_fck_parents,
1919};
1920
1921static struct ti_clk_composite gpt11_fck_data = {
1922 .mux = &gpt11_mux_fck_data,
1923 .gate = &gpt11_gate_fck_data,
1924};
1925
1926static struct ti_clk gpt11_fck = {
1927 .name = "gpt11_fck",
1928 .type = TI_CLK_COMPOSITE,
1929 .data = &gpt11_fck_data,
1930};
1931
1932static struct ti_clk_fixed_factor core_d6_ck_data = {
1933 .parent = "core_ck",
1934 .div = 6,
1935 .mult = 1,
1936};
1937
1938static struct ti_clk core_d6_ck = {
1939 .name = "core_d6_ck",
1940 .type = TI_CLK_FIXED_FACTOR,
1941 .data = &core_d6_ck_data,
1942};
1943
1944static struct ti_clk_gate uart4_fck_am35xx_data = {
1945 .parent = "core_48m_fck",
1946 .bit_shift = 23,
1947 .reg = 0xa00,
1948 .module = TI_CLKM_CM,
1949 .flags = CLKF_WAIT,
1950};
1951
1952static struct ti_clk uart4_fck_am35xx = {
1953 .name = "uart4_fck_am35xx",
1954 .clkdm_name = "core_l4_clkdm",
1955 .type = TI_CLK_GATE,
1956 .data = &uart4_fck_am35xx_data,
1957};
1958
1959static struct ti_clk_gate dpll3_m3x2_ck_data = {
1960 .parent = "dpll3_m3x2_mul_ck",
1961 .bit_shift = 0xc,
1962 .reg = 0xd00,
1963 .module = TI_CLKM_CM,
1964 .flags = CLKF_SET_BIT_TO_DISABLE,
1965};
1966
1967static struct ti_clk dpll3_m3x2_ck = {
1968 .name = "dpll3_m3x2_ck",
1969 .type = TI_CLK_GATE,
1970 .data = &dpll3_m3x2_ck_data,
1971};
1972
1973static struct ti_clk_fixed_factor emu_core_alwon_ck_data = {
1974 .parent = "dpll3_m3x2_ck",
1975 .div = 1,
1976 .mult = 1,
1977};
1978
1979static struct ti_clk emu_core_alwon_ck = {
1980 .name = "emu_core_alwon_ck",
1981 .type = TI_CLK_FIXED_FACTOR,
1982 .data = &emu_core_alwon_ck_data,
1983};
1984
1985static struct ti_clk_divider dpll4_m6_ck_data = {
1986 .parent = "dpll4_ck",
1987 .bit_shift = 24,
1988 .max_div = 63,
1989 .reg = 0x1140,
1990 .module = TI_CLKM_CM,
1991 .flags = CLKF_INDEX_STARTS_AT_ONE,
1992};
1993
1994static struct ti_clk dpll4_m6_ck = {
1995 .name = "dpll4_m6_ck",
1996 .type = TI_CLK_DIVIDER,
1997 .data = &dpll4_m6_ck_data,
1998};
1999
2000static struct ti_clk_fixed_factor dpll4_m6x2_mul_ck_data = {
2001 .parent = "dpll4_m6_ck",
2002 .div = 1,
2003 .mult = 2,
2004};
2005
2006static struct ti_clk dpll4_m6x2_mul_ck = {
2007 .name = "dpll4_m6x2_mul_ck",
2008 .type = TI_CLK_FIXED_FACTOR,
2009 .data = &dpll4_m6x2_mul_ck_data,
2010};
2011
2012static struct ti_clk_gate dpll4_m6x2_ck_data = {
2013 .parent = "dpll4_m6x2_mul_ck",
2014 .bit_shift = 0x1f,
2015 .reg = 0xd00,
2016 .module = TI_CLKM_CM,
2017 .flags = CLKF_SET_BIT_TO_DISABLE,
2018};
2019
2020static struct ti_clk dpll4_m6x2_ck = {
2021 .name = "dpll4_m6x2_ck",
2022 .type = TI_CLK_GATE,
2023 .data = &dpll4_m6x2_ck_data,
2024};
2025
2026static struct ti_clk_fixed_factor emu_per_alwon_ck_data = {
2027 .parent = "dpll4_m6x2_ck",
2028 .div = 1,
2029 .mult = 1,
2030};
2031
2032static struct ti_clk emu_per_alwon_ck = {
2033 .name = "emu_per_alwon_ck",
2034 .type = TI_CLK_FIXED_FACTOR,
2035 .data = &emu_per_alwon_ck_data,
2036};
2037
2038static struct ti_clk_fixed_factor emu_mpu_alwon_ck_data = {
2039 .parent = "mpu_ck",
2040 .div = 1,
2041 .mult = 1,
2042};
2043
2044static struct ti_clk emu_mpu_alwon_ck = {
2045 .name = "emu_mpu_alwon_ck",
2046 .type = TI_CLK_FIXED_FACTOR,
2047 .data = &emu_mpu_alwon_ck_data,
2048};
2049
2050static const char *emu_src_mux_ck_parents[] = {
2051 "sys_ck",
2052 "emu_core_alwon_ck",
2053 "emu_per_alwon_ck",
2054 "emu_mpu_alwon_ck",
2055};
2056
2057static struct ti_clk_mux emu_src_mux_ck_data = {
2058 .num_parents = ARRAY_SIZE(emu_src_mux_ck_parents),
2059 .reg = 0x1140,
2060 .module = TI_CLKM_CM,
2061 .parents = emu_src_mux_ck_parents,
2062};
2063
2064static struct ti_clk emu_src_mux_ck = {
2065 .name = "emu_src_mux_ck",
2066 .type = TI_CLK_MUX,
2067 .data = &emu_src_mux_ck_data,
2068};
2069
2070static struct ti_clk_gate emu_src_ck_data = {
2071 .parent = "emu_src_mux_ck",
2072 .flags = CLKF_CLKDM,
2073};
2074
2075static struct ti_clk emu_src_ck = {
2076 .name = "emu_src_ck",
2077 .clkdm_name = "emu_clkdm",
2078 .type = TI_CLK_GATE,
2079 .data = &emu_src_ck_data,
2080};
2081
2082static struct ti_clk_divider atclk_fck_data = {
2083 .parent = "emu_src_ck",
2084 .bit_shift = 4,
2085 .max_div = 3,
2086 .reg = 0x1140,
2087 .module = TI_CLKM_CM,
2088 .flags = CLKF_INDEX_STARTS_AT_ONE,
2089};
2090
2091static struct ti_clk atclk_fck = {
2092 .name = "atclk_fck",
2093 .type = TI_CLK_DIVIDER,
2094 .data = &atclk_fck_data,
2095};
2096
2097static struct ti_clk_gate ipss_ick_data = {
2098 .parent = "core_l3_ick",
2099 .bit_shift = 4,
2100 .reg = 0xa10,
2101 .module = TI_CLKM_CM,
2102 .flags = CLKF_AM35XX | CLKF_INTERFACE,
2103};
2104
2105static struct ti_clk ipss_ick = {
2106 .name = "ipss_ick",
2107 .clkdm_name = "core_l3_clkdm",
2108 .type = TI_CLK_GATE,
2109 .data = &ipss_ick_data,
2110};
2111
2112static struct ti_clk_gate emac_ick_data = {
2113 .parent = "ipss_ick",
2114 .bit_shift = 1,
2115 .reg = 0x59c,
2116 .module = TI_CLKM_SCRM,
2117 .flags = CLKF_AM35XX,
2118};
2119
2120static struct ti_clk emac_ick = {
2121 .name = "emac_ick",
2122 .clkdm_name = "core_l3_clkdm",
2123 .type = TI_CLK_GATE,
2124 .data = &emac_ick_data,
2125};
2126
2127static struct ti_clk_gate vpfe_ick_data = {
2128 .parent = "ipss_ick",
2129 .bit_shift = 2,
2130 .reg = 0x59c,
2131 .module = TI_CLKM_SCRM,
2132 .flags = CLKF_AM35XX,
2133};
2134
2135static struct ti_clk vpfe_ick = {
2136 .name = "vpfe_ick",
2137 .clkdm_name = "core_l3_clkdm",
2138 .type = TI_CLK_GATE,
2139 .data = &vpfe_ick_data,
2140};
2141
2142static const char *dpll2_ck_parents[] = {
2143 "sys_ck",
2144 "dpll2_fck",
2145};
2146
2147static struct ti_clk_dpll dpll2_ck_data = {
2148 .num_parents = ARRAY_SIZE(dpll2_ck_parents),
2149 .control_reg = 0x4,
2150 .idlest_reg = 0x24,
2151 .mult_div1_reg = 0x40,
2152 .autoidle_reg = 0x34,
2153 .module = TI_CLKM_CM,
2154 .parents = dpll2_ck_parents,
2155 .freqsel_mask = 0xf0,
2156 .modes = 0xa2,
2157 .div1_mask = 0x7f,
2158 .idlest_mask = 0x1,
2159 .auto_recal_bit = 0x3,
2160 .max_divider = 0x80,
2161 .min_divider = 0x1,
2162 .recal_en_bit = 0x8,
2163 .max_multiplier = 0x7ff,
2164 .enable_mask = 0x7,
2165 .mult_mask = 0x7ff00,
2166 .recal_st_bit = 0x8,
2167 .autoidle_mask = 0x7,
2168};
2169
2170static struct ti_clk dpll2_ck = {
2171 .name = "dpll2_ck",
2172 .clkdm_name = "dpll2_clkdm",
2173 .type = TI_CLK_DPLL,
2174 .data = &dpll2_ck_data,
2175};
2176
2177static struct ti_clk_divider dpll2_m2_ck_data = {
2178 .parent = "dpll2_ck",
2179 .max_div = 31,
2180 .reg = 0x44,
2181 .module = TI_CLKM_CM,
2182 .flags = CLKF_INDEX_STARTS_AT_ONE,
2183};
2184
2185static struct ti_clk dpll2_m2_ck = {
2186 .name = "dpll2_m2_ck",
2187 .type = TI_CLK_DIVIDER,
2188 .data = &dpll2_m2_ck_data,
2189};
2190
2191static const char *mcbsp4_mux_fck_parents[] = {
2192 "per_96m_fck",
2193 "mcbsp_clks",
2194};
2195
2196static struct ti_clk_mux mcbsp4_mux_fck_data = {
2197 .bit_shift = 2,
2198 .num_parents = ARRAY_SIZE(mcbsp4_mux_fck_parents),
2199 .reg = 0x2d8,
2200 .module = TI_CLKM_SCRM,
2201 .parents = mcbsp4_mux_fck_parents,
2202};
2203
2204static const char *mcbsp1_mux_fck_parents[] = {
2205 "core_96m_fck",
2206 "mcbsp_clks",
2207};
2208
2209static struct ti_clk_mux mcbsp1_mux_fck_data = {
2210 .bit_shift = 2,
2211 .num_parents = ARRAY_SIZE(mcbsp1_mux_fck_parents),
2212 .reg = 0x274,
2213 .module = TI_CLKM_SCRM,
2214 .parents = mcbsp1_mux_fck_parents,
2215};
2216
2217static struct ti_clk_gate gpt8_gate_fck_data = {
2218 .parent = "sys_ck",
2219 .bit_shift = 9,
2220 .reg = 0x1000,
2221 .module = TI_CLKM_CM,
2222};
2223
2224static struct ti_clk_gate gpt8_ick_data = {
2225 .parent = "per_l4_ick",
2226 .bit_shift = 9,
2227 .reg = 0x1010,
2228 .module = TI_CLKM_CM,
2229 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2230};
2231
2232static struct ti_clk gpt8_ick = {
2233 .name = "gpt8_ick",
2234 .clkdm_name = "per_clkdm",
2235 .type = TI_CLK_GATE,
2236 .data = &gpt8_ick_data,
2237};
2238
2239static const char *gpt10_mux_fck_parents[] = {
2240 "omap_32k_fck",
2241 "sys_ck",
2242};
2243
2244static struct ti_clk_mux gpt10_mux_fck_data = {
2245 .bit_shift = 6,
2246 .num_parents = ARRAY_SIZE(gpt10_mux_fck_parents),
2247 .reg = 0xa40,
2248 .module = TI_CLKM_CM,
2249 .parents = gpt10_mux_fck_parents,
2250};
2251
2252static struct ti_clk_gate mmchs3_ick_data = {
2253 .parent = "core_l4_ick",
2254 .bit_shift = 30,
2255 .reg = 0xa10,
2256 .module = TI_CLKM_CM,
2257 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2258};
2259
2260static struct ti_clk mmchs3_ick = {
2261 .name = "mmchs3_ick",
2262 .clkdm_name = "core_l4_clkdm",
2263 .type = TI_CLK_GATE,
2264 .data = &mmchs3_ick_data,
2265};
2266
2267static struct ti_clk_gate gpio3_ick_data = {
2268 .parent = "per_l4_ick",
2269 .bit_shift = 14,
2270 .reg = 0x1010,
2271 .module = TI_CLKM_CM,
2272 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2273};
2274
2275static struct ti_clk gpio3_ick = {
2276 .name = "gpio3_ick",
2277 .clkdm_name = "per_clkdm",
2278 .type = TI_CLK_GATE,
2279 .data = &gpio3_ick_data,
2280};
2281
2282static const char *traceclk_src_fck_parents[] = {
2283 "sys_ck",
2284 "emu_core_alwon_ck",
2285 "emu_per_alwon_ck",
2286 "emu_mpu_alwon_ck",
2287};
2288
2289static struct ti_clk_mux traceclk_src_fck_data = {
2290 .bit_shift = 2,
2291 .num_parents = ARRAY_SIZE(traceclk_src_fck_parents),
2292 .reg = 0x1140,
2293 .module = TI_CLKM_CM,
2294 .parents = traceclk_src_fck_parents,
2295};
2296
2297static struct ti_clk traceclk_src_fck = {
2298 .name = "traceclk_src_fck",
2299 .type = TI_CLK_MUX,
2300 .data = &traceclk_src_fck_data,
2301};
2302
2303static struct ti_clk_divider traceclk_fck_data = {
2304 .parent = "traceclk_src_fck",
2305 .bit_shift = 11,
2306 .max_div = 7,
2307 .reg = 0x1140,
2308 .module = TI_CLKM_CM,
2309 .flags = CLKF_INDEX_STARTS_AT_ONE,
2310};
2311
2312static struct ti_clk traceclk_fck = {
2313 .name = "traceclk_fck",
2314 .type = TI_CLK_DIVIDER,
2315 .data = &traceclk_fck_data,
2316};
2317
2318static struct ti_clk_gate mcbsp5_gate_fck_data = {
2319 .parent = "mcbsp_clks",
2320 .bit_shift = 10,
2321 .reg = 0xa00,
2322 .module = TI_CLKM_CM,
2323};
2324
2325static struct ti_clk_gate sad2d_ick_data = {
2326 .parent = "l3_ick",
2327 .bit_shift = 3,
2328 .reg = 0xa10,
2329 .module = TI_CLKM_CM,
2330 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2331};
2332
2333static struct ti_clk sad2d_ick = {
2334 .name = "sad2d_ick",
2335 .clkdm_name = "d2d_clkdm",
2336 .type = TI_CLK_GATE,
2337 .data = &sad2d_ick_data,
2338};
2339
2340static const char *gpt1_mux_fck_parents[] = {
2341 "omap_32k_fck",
2342 "sys_ck",
2343};
2344
2345static struct ti_clk_mux gpt1_mux_fck_data = {
2346 .num_parents = ARRAY_SIZE(gpt1_mux_fck_parents),
2347 .reg = 0xc40,
2348 .module = TI_CLKM_CM,
2349 .parents = gpt1_mux_fck_parents,
2350};
2351
2352static struct ti_clk_gate hecc_ck_data = {
2353 .parent = "sys_ck",
2354 .bit_shift = 3,
2355 .reg = 0x59c,
2356 .module = TI_CLKM_SCRM,
2357 .flags = CLKF_AM35XX,
2358};
2359
2360static struct ti_clk hecc_ck = {
2361 .name = "hecc_ck",
2362 .clkdm_name = "core_l3_clkdm",
2363 .type = TI_CLK_GATE,
2364 .data = &hecc_ck_data,
2365};
2366
2367static struct ti_clk_gate gpt1_gate_fck_data = {
2368 .parent = "sys_ck",
2369 .bit_shift = 0,
2370 .reg = 0xc00,
2371 .module = TI_CLKM_CM,
2372};
2373
2374static struct ti_clk_composite gpt1_fck_data = {
2375 .mux = &gpt1_mux_fck_data,
2376 .gate = &gpt1_gate_fck_data,
2377};
2378
2379static struct ti_clk gpt1_fck = {
2380 .name = "gpt1_fck",
2381 .type = TI_CLK_COMPOSITE,
2382 .data = &gpt1_fck_data,
2383};
2384
2385static struct ti_clk_gate dpll4_m2x2_ck_omap36xx_data = {
2386 .parent = "dpll4_m2x2_mul_ck",
2387 .bit_shift = 0x1b,
2388 .reg = 0xd00,
2389 .module = TI_CLKM_CM,
2390 .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
2391};
2392
2393static struct ti_clk dpll4_m2x2_ck_omap36xx = {
2394 .name = "dpll4_m2x2_ck",
2395 .type = TI_CLK_GATE,
2396 .data = &dpll4_m2x2_ck_omap36xx_data,
2397 .patch = &dpll4_m2x2_ck,
2398};
2399
2400static struct ti_clk_divider gfx_l3_fck_data = {
2401 .parent = "l3_ick",
2402 .max_div = 7,
2403 .reg = 0xb40,
2404 .module = TI_CLKM_CM,
2405 .flags = CLKF_INDEX_STARTS_AT_ONE,
2406};
2407
2408static struct ti_clk gfx_l3_fck = {
2409 .name = "gfx_l3_fck",
2410 .type = TI_CLK_DIVIDER,
2411 .data = &gfx_l3_fck_data,
2412};
2413
2414static struct ti_clk_gate gfx_cg1_ck_data = {
2415 .parent = "gfx_l3_fck",
2416 .bit_shift = 1,
2417 .reg = 0xb00,
2418 .module = TI_CLKM_CM,
2419 .flags = CLKF_WAIT,
2420};
2421
2422static struct ti_clk gfx_cg1_ck = {
2423 .name = "gfx_cg1_ck",
2424 .clkdm_name = "gfx_3430es1_clkdm",
2425 .type = TI_CLK_GATE,
2426 .data = &gfx_cg1_ck_data,
2427};
2428
2429static struct ti_clk_gate mailboxes_ick_data = {
2430 .parent = "core_l4_ick",
2431 .bit_shift = 7,
2432 .reg = 0xa10,
2433 .module = TI_CLKM_CM,
2434 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2435};
2436
2437static struct ti_clk mailboxes_ick = {
2438 .name = "mailboxes_ick",
2439 .clkdm_name = "core_l4_clkdm",
2440 .type = TI_CLK_GATE,
2441 .data = &mailboxes_ick_data,
2442};
2443
2444static struct ti_clk_gate sha11_ick_data = {
2445 .parent = "security_l4_ick2",
2446 .bit_shift = 1,
2447 .reg = 0xa14,
2448 .module = TI_CLKM_CM,
2449 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2450};
2451
2452static struct ti_clk sha11_ick = {
2453 .name = "sha11_ick",
2454 .type = TI_CLK_GATE,
2455 .data = &sha11_ick_data,
2456};
2457
2458static struct ti_clk_gate hsotgusb_ick_am35xx_data = {
2459 .parent = "ipss_ick",
2460 .bit_shift = 0,
2461 .reg = 0x59c,
2462 .module = TI_CLKM_SCRM,
2463 .flags = CLKF_AM35XX,
2464};
2465
2466static struct ti_clk hsotgusb_ick_am35xx = {
2467 .name = "hsotgusb_ick_am35xx",
2468 .clkdm_name = "core_l3_clkdm",
2469 .type = TI_CLK_GATE,
2470 .data = &hsotgusb_ick_am35xx_data,
2471};
2472
2473static struct ti_clk_gate mmchs3_fck_data = {
2474 .parent = "core_96m_fck",
2475 .bit_shift = 30,
2476 .reg = 0xa00,
2477 .module = TI_CLKM_CM,
2478 .flags = CLKF_WAIT,
2479};
2480
2481static struct ti_clk mmchs3_fck = {
2482 .name = "mmchs3_fck",
2483 .clkdm_name = "core_l4_clkdm",
2484 .type = TI_CLK_GATE,
2485 .data = &mmchs3_fck_data,
2486};
2487
2488static struct ti_clk_divider pclk_fck_data = {
2489 .parent = "emu_src_ck",
2490 .bit_shift = 8,
2491 .max_div = 7,
2492 .reg = 0x1140,
2493 .module = TI_CLKM_CM,
2494 .flags = CLKF_INDEX_STARTS_AT_ONE,
2495};
2496
2497static struct ti_clk pclk_fck = {
2498 .name = "pclk_fck",
2499 .type = TI_CLK_DIVIDER,
2500 .data = &pclk_fck_data,
2501};
2502
2503static const char *dpll4_ck_omap36xx_parents[] = {
2504 "sys_ck",
2505 "sys_ck",
2506};
2507
2508static struct ti_clk_dpll dpll4_ck_omap36xx_data = {
2509 .num_parents = ARRAY_SIZE(dpll4_ck_omap36xx_parents),
2510 .control_reg = 0xd00,
2511 .idlest_reg = 0xd20,
2512 .mult_div1_reg = 0xd44,
2513 .autoidle_reg = 0xd30,
2514 .module = TI_CLKM_CM,
2515 .parents = dpll4_ck_omap36xx_parents,
2516 .modes = 0x82,
2517 .div1_mask = 0x7f,
2518 .idlest_mask = 0x2,
2519 .auto_recal_bit = 0x13,
2520 .max_divider = 0x80,
2521 .min_divider = 0x1,
2522 .recal_en_bit = 0x6,
2523 .max_multiplier = 0xfff,
2524 .enable_mask = 0x70000,
2525 .mult_mask = 0xfff00,
2526 .recal_st_bit = 0x6,
2527 .autoidle_mask = 0x38,
2528 .sddiv_mask = 0xff000000,
2529 .dco_mask = 0xe00000,
2530 .flags = CLKF_PER | CLKF_J_TYPE,
2531};
2532
2533static struct ti_clk dpll4_ck_omap36xx = {
2534 .name = "dpll4_ck",
2535 .type = TI_CLK_DPLL,
2536 .data = &dpll4_ck_omap36xx_data,
2537 .patch = &dpll4_ck,
2538};
2539
2540static struct ti_clk_gate uart3_fck_data = {
2541 .parent = "per_48m_fck",
2542 .bit_shift = 11,
2543 .reg = 0x1000,
2544 .module = TI_CLKM_CM,
2545 .flags = CLKF_WAIT,
2546};
2547
2548static struct ti_clk uart3_fck = {
2549 .name = "uart3_fck",
2550 .clkdm_name = "per_clkdm",
2551 .type = TI_CLK_GATE,
2552 .data = &uart3_fck_data,
2553};
2554
2555static struct ti_clk_fixed_factor wkup_32k_fck_data = {
2556 .parent = "omap_32k_fck",
2557 .div = 1,
2558 .mult = 1,
2559};
2560
2561static struct ti_clk wkup_32k_fck = {
2562 .name = "wkup_32k_fck",
2563 .type = TI_CLK_FIXED_FACTOR,
2564 .data = &wkup_32k_fck_data,
2565};
2566
2567static struct ti_clk_gate sys_clkout1_data = {
2568 .parent = "osc_sys_ck",
2569 .bit_shift = 7,
2570 .reg = 0xd70,
2571 .module = TI_CLKM_PRM,
2572};
2573
2574static struct ti_clk sys_clkout1 = {
2575 .name = "sys_clkout1",
2576 .type = TI_CLK_GATE,
2577 .data = &sys_clkout1_data,
2578};
2579
2580static struct ti_clk_fixed_factor gpmc_fck_data = {
2581 .parent = "core_l3_ick",
2582 .div = 1,
2583 .mult = 1,
2584};
2585
2586static struct ti_clk gpmc_fck = {
2587 .name = "gpmc_fck",
2588 .type = TI_CLK_FIXED_FACTOR,
2589 .data = &gpmc_fck_data,
2590};
2591
2592static struct ti_clk_fixed_factor dpll5_m2_d20_ck_data = {
2593 .parent = "dpll5_m2_ck",
2594 .div = 20,
2595 .mult = 1,
2596};
2597
2598static struct ti_clk dpll5_m2_d20_ck = {
2599 .name = "dpll5_m2_d20_ck",
2600 .type = TI_CLK_FIXED_FACTOR,
2601 .data = &dpll5_m2_d20_ck_data,
2602};
2603
2604static struct ti_clk_gate dpll4_m5x2_ck_omap36xx_data = {
2605 .parent = "dpll4_m5x2_mul_ck",
2606 .bit_shift = 0x1e,
2607 .reg = 0xd00,
2608 .module = TI_CLKM_CM,
2609 .flags = CLKF_HSDIV | CLKF_SET_RATE_PARENT | CLKF_SET_BIT_TO_DISABLE,
2610};
2611
2612static struct ti_clk dpll4_m5x2_ck_omap36xx = {
2613 .name = "dpll4_m5x2_ck",
2614 .type = TI_CLK_GATE,
2615 .data = &dpll4_m5x2_ck_omap36xx_data,
2616 .patch = &dpll4_m5x2_ck,
2617};
2618
2619static struct ti_clk_gate ssi_ssr_gate_fck_3430es2_data = {
2620 .parent = "corex2_fck",
2621 .bit_shift = 0,
2622 .reg = 0xa00,
2623 .module = TI_CLKM_CM,
2624 .flags = CLKF_NO_WAIT,
2625};
2626
2627static struct ti_clk_gate uart1_ick_data = {
2628 .parent = "core_l4_ick",
2629 .bit_shift = 13,
2630 .reg = 0xa10,
2631 .module = TI_CLKM_CM,
2632 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2633};
2634
2635static struct ti_clk uart1_ick = {
2636 .name = "uart1_ick",
2637 .clkdm_name = "core_l4_clkdm",
2638 .type = TI_CLK_GATE,
2639 .data = &uart1_ick_data,
2640};
2641
2642static struct ti_clk_gate iva2_ck_data = {
2643 .parent = "dpll2_m2_ck",
2644 .bit_shift = 0,
2645 .reg = 0x0,
2646 .module = TI_CLKM_CM,
2647 .flags = CLKF_WAIT,
2648};
2649
2650static struct ti_clk iva2_ck = {
2651 .name = "iva2_ck",
2652 .clkdm_name = "iva2_clkdm",
2653 .type = TI_CLK_GATE,
2654 .data = &iva2_ck_data,
2655};
2656
2657static struct ti_clk_gate pka_ick_data = {
2658 .parent = "security_l3_ick",
2659 .bit_shift = 4,
2660 .reg = 0xa14,
2661 .module = TI_CLKM_CM,
2662 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2663};
2664
2665static struct ti_clk pka_ick = {
2666 .name = "pka_ick",
2667 .type = TI_CLK_GATE,
2668 .data = &pka_ick_data,
2669};
2670
2671static struct ti_clk_gate gpt12_ick_data = {
2672 .parent = "wkup_l4_ick",
2673 .bit_shift = 1,
2674 .reg = 0xc10,
2675 .module = TI_CLKM_CM,
2676 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2677};
2678
2679static struct ti_clk gpt12_ick = {
2680 .name = "gpt12_ick",
2681 .clkdm_name = "wkup_clkdm",
2682 .type = TI_CLK_GATE,
2683 .data = &gpt12_ick_data,
2684};
2685
2686static const char *mcbsp5_mux_fck_parents[] = {
2687 "core_96m_fck",
2688 "mcbsp_clks",
2689};
2690
2691static struct ti_clk_mux mcbsp5_mux_fck_data = {
2692 .bit_shift = 4,
2693 .num_parents = ARRAY_SIZE(mcbsp5_mux_fck_parents),
2694 .reg = 0x2d8,
2695 .module = TI_CLKM_SCRM,
2696 .parents = mcbsp5_mux_fck_parents,
2697};
2698
2699static struct ti_clk_composite mcbsp5_fck_data = {
2700 .mux = &mcbsp5_mux_fck_data,
2701 .gate = &mcbsp5_gate_fck_data,
2702};
2703
2704static struct ti_clk mcbsp5_fck = {
2705 .name = "mcbsp5_fck",
2706 .type = TI_CLK_COMPOSITE,
2707 .data = &mcbsp5_fck_data,
2708};
2709
2710static struct ti_clk_gate usbhost_48m_fck_data = {
2711 .parent = "omap_48m_fck",
2712 .bit_shift = 0,
2713 .reg = 0x1400,
2714 .module = TI_CLKM_CM,
2715 .flags = CLKF_DSS,
2716};
2717
2718static struct ti_clk usbhost_48m_fck = {
2719 .name = "usbhost_48m_fck",
2720 .clkdm_name = "usbhost_clkdm",
2721 .type = TI_CLK_GATE,
2722 .data = &usbhost_48m_fck_data,
2723};
2724
2725static struct ti_clk_gate des1_ick_data = {
2726 .parent = "security_l4_ick2",
2727 .bit_shift = 0,
2728 .reg = 0xa14,
2729 .module = TI_CLKM_CM,
2730 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2731};
2732
2733static struct ti_clk des1_ick = {
2734 .name = "des1_ick",
2735 .type = TI_CLK_GATE,
2736 .data = &des1_ick_data,
2737};
2738
2739static struct ti_clk_gate sgx_gate_fck_data = {
2740 .parent = "core_ck",
2741 .bit_shift = 1,
2742 .reg = 0xb00,
2743 .module = TI_CLKM_CM,
2744};
2745
2746static struct ti_clk_fixed_factor core_d4_ck_data = {
2747 .parent = "core_ck",
2748 .div = 4,
2749 .mult = 1,
2750};
2751
2752static struct ti_clk core_d4_ck = {
2753 .name = "core_d4_ck",
2754 .type = TI_CLK_FIXED_FACTOR,
2755 .data = &core_d4_ck_data,
2756};
2757
2758static struct ti_clk_fixed_factor omap_192m_alwon_fck_data = {
2759 .parent = "dpll4_m2x2_ck",
2760 .div = 1,
2761 .mult = 1,
2762};
2763
2764static struct ti_clk omap_192m_alwon_fck = {
2765 .name = "omap_192m_alwon_fck",
2766 .type = TI_CLK_FIXED_FACTOR,
2767 .data = &omap_192m_alwon_fck_data,
2768};
2769
2770static struct ti_clk_fixed_factor core_d2_ck_data = {
2771 .parent = "core_ck",
2772 .div = 2,
2773 .mult = 1,
2774};
2775
2776static struct ti_clk core_d2_ck = {
2777 .name = "core_d2_ck",
2778 .type = TI_CLK_FIXED_FACTOR,
2779 .data = &core_d2_ck_data,
2780};
2781
2782static struct ti_clk_fixed_factor corex2_d3_fck_data = {
2783 .parent = "corex2_fck",
2784 .div = 3,
2785 .mult = 1,
2786};
2787
2788static struct ti_clk corex2_d3_fck = {
2789 .name = "corex2_d3_fck",
2790 .type = TI_CLK_FIXED_FACTOR,
2791 .data = &corex2_d3_fck_data,
2792};
2793
2794static struct ti_clk_fixed_factor corex2_d5_fck_data = {
2795 .parent = "corex2_fck",
2796 .div = 5,
2797 .mult = 1,
2798};
2799
2800static struct ti_clk corex2_d5_fck = {
2801 .name = "corex2_d5_fck",
2802 .type = TI_CLK_FIXED_FACTOR,
2803 .data = &corex2_d5_fck_data,
2804};
2805
2806static const char *sgx_mux_fck_parents[] = {
2807 "core_d3_ck",
2808 "core_d4_ck",
2809 "core_d6_ck",
2810 "cm_96m_fck",
2811 "omap_192m_alwon_fck",
2812 "core_d2_ck",
2813 "corex2_d3_fck",
2814 "corex2_d5_fck",
2815};
2816
2817static struct ti_clk_mux sgx_mux_fck_data = {
2818 .num_parents = ARRAY_SIZE(sgx_mux_fck_parents),
2819 .reg = 0xb40,
2820 .module = TI_CLKM_CM,
2821 .parents = sgx_mux_fck_parents,
2822};
2823
2824static struct ti_clk_composite sgx_fck_data = {
2825 .mux = &sgx_mux_fck_data,
2826 .gate = &sgx_gate_fck_data,
2827};
2828
2829static struct ti_clk sgx_fck = {
2830 .name = "sgx_fck",
2831 .type = TI_CLK_COMPOSITE,
2832 .data = &sgx_fck_data,
2833};
2834
2835static struct ti_clk_gate mcspi1_fck_data = {
2836 .parent = "core_48m_fck",
2837 .bit_shift = 18,
2838 .reg = 0xa00,
2839 .module = TI_CLKM_CM,
2840 .flags = CLKF_WAIT,
2841};
2842
2843static struct ti_clk mcspi1_fck = {
2844 .name = "mcspi1_fck",
2845 .clkdm_name = "core_l4_clkdm",
2846 .type = TI_CLK_GATE,
2847 .data = &mcspi1_fck_data,
2848};
2849
2850static struct ti_clk_gate mmchs2_fck_data = {
2851 .parent = "core_96m_fck",
2852 .bit_shift = 25,
2853 .reg = 0xa00,
2854 .module = TI_CLKM_CM,
2855 .flags = CLKF_WAIT,
2856};
2857
2858static struct ti_clk mmchs2_fck = {
2859 .name = "mmchs2_fck",
2860 .clkdm_name = "core_l4_clkdm",
2861 .type = TI_CLK_GATE,
2862 .data = &mmchs2_fck_data,
2863};
2864
2865static struct ti_clk_gate mcspi2_fck_data = {
2866 .parent = "core_48m_fck",
2867 .bit_shift = 19,
2868 .reg = 0xa00,
2869 .module = TI_CLKM_CM,
2870 .flags = CLKF_WAIT,
2871};
2872
2873static struct ti_clk mcspi2_fck = {
2874 .name = "mcspi2_fck",
2875 .clkdm_name = "core_l4_clkdm",
2876 .type = TI_CLK_GATE,
2877 .data = &mcspi2_fck_data,
2878};
2879
2880static struct ti_clk_gate vpfe_fck_data = {
2881 .parent = "pclk_ck",
2882 .bit_shift = 10,
2883 .reg = 0x59c,
2884 .module = TI_CLKM_SCRM,
2885};
2886
2887static struct ti_clk vpfe_fck = {
2888 .name = "vpfe_fck",
2889 .type = TI_CLK_GATE,
2890 .data = &vpfe_fck_data,
2891};
2892
2893static struct ti_clk_gate gpt4_gate_fck_data = {
2894 .parent = "sys_ck",
2895 .bit_shift = 5,
2896 .reg = 0x1000,
2897 .module = TI_CLKM_CM,
2898};
2899
2900static struct ti_clk_gate mcbsp1_gate_fck_data = {
2901 .parent = "mcbsp_clks",
2902 .bit_shift = 9,
2903 .reg = 0xa00,
2904 .module = TI_CLKM_CM,
2905};
2906
2907static struct ti_clk_gate gpt5_gate_fck_data = {
2908 .parent = "sys_ck",
2909 .bit_shift = 6,
2910 .reg = 0x1000,
2911 .module = TI_CLKM_CM,
2912};
2913
2914static const char *gpt5_mux_fck_parents[] = {
2915 "omap_32k_fck",
2916 "sys_ck",
2917};
2918
2919static struct ti_clk_mux gpt5_mux_fck_data = {
2920 .bit_shift = 3,
2921 .num_parents = ARRAY_SIZE(gpt5_mux_fck_parents),
2922 .reg = 0x1040,
2923 .module = TI_CLKM_CM,
2924 .parents = gpt5_mux_fck_parents,
2925};
2926
2927static struct ti_clk_composite gpt5_fck_data = {
2928 .mux = &gpt5_mux_fck_data,
2929 .gate = &gpt5_gate_fck_data,
2930};
2931
2932static struct ti_clk gpt5_fck = {
2933 .name = "gpt5_fck",
2934 .type = TI_CLK_COMPOSITE,
2935 .data = &gpt5_fck_data,
2936};
2937
2938static struct ti_clk_gate ts_fck_data = {
2939 .parent = "omap_32k_fck",
2940 .bit_shift = 1,
2941 .reg = 0xa08,
2942 .module = TI_CLKM_CM,
2943};
2944
2945static struct ti_clk ts_fck = {
2946 .name = "ts_fck",
2947 .clkdm_name = "core_l4_clkdm",
2948 .type = TI_CLK_GATE,
2949 .data = &ts_fck_data,
2950};
2951
2952static struct ti_clk_fixed_factor wdt1_fck_data = {
2953 .parent = "secure_32k_fck",
2954 .div = 1,
2955 .mult = 1,
2956};
2957
2958static struct ti_clk wdt1_fck = {
2959 .name = "wdt1_fck",
2960 .type = TI_CLK_FIXED_FACTOR,
2961 .data = &wdt1_fck_data,
2962};
2963
2964static struct ti_clk_gate dpll4_m6x2_ck_omap36xx_data = {
2965 .parent = "dpll4_m6x2_mul_ck",
2966 .bit_shift = 0x1f,
2967 .reg = 0xd00,
2968 .module = TI_CLKM_CM,
2969 .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
2970};
2971
2972static struct ti_clk dpll4_m6x2_ck_omap36xx = {
2973 .name = "dpll4_m6x2_ck",
2974 .type = TI_CLK_GATE,
2975 .data = &dpll4_m6x2_ck_omap36xx_data,
2976 .patch = &dpll4_m6x2_ck,
2977};
2978
2979static const char *gpt4_mux_fck_parents[] = {
2980 "omap_32k_fck",
2981 "sys_ck",
2982};
2983
2984static struct ti_clk_mux gpt4_mux_fck_data = {
2985 .bit_shift = 2,
2986 .num_parents = ARRAY_SIZE(gpt4_mux_fck_parents),
2987 .reg = 0x1040,
2988 .module = TI_CLKM_CM,
2989 .parents = gpt4_mux_fck_parents,
2990};
2991
2992static struct ti_clk_gate usbhost_ick_data = {
2993 .parent = "l4_ick",
2994 .bit_shift = 0,
2995 .reg = 0x1410,
2996 .module = TI_CLKM_CM,
2997 .flags = CLKF_DSS | CLKF_OMAP3 | CLKF_INTERFACE,
2998};
2999
3000static struct ti_clk usbhost_ick = {
3001 .name = "usbhost_ick",
3002 .clkdm_name = "usbhost_clkdm",
3003 .type = TI_CLK_GATE,
3004 .data = &usbhost_ick_data,
3005};
3006
3007static struct ti_clk_gate mcbsp2_ick_data = {
3008 .parent = "per_l4_ick",
3009 .bit_shift = 0,
3010 .reg = 0x1010,
3011 .module = TI_CLKM_CM,
3012 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3013};
3014
3015static struct ti_clk mcbsp2_ick = {
3016 .name = "mcbsp2_ick",
3017 .clkdm_name = "per_clkdm",
3018 .type = TI_CLK_GATE,
3019 .data = &mcbsp2_ick_data,
3020};
3021
3022static struct ti_clk_gate omapctrl_ick_data = {
3023 .parent = "core_l4_ick",
3024 .bit_shift = 6,
3025 .reg = 0xa10,
3026 .module = TI_CLKM_CM,
3027 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3028};
3029
3030static struct ti_clk omapctrl_ick = {
3031 .name = "omapctrl_ick",
3032 .clkdm_name = "core_l4_clkdm",
3033 .type = TI_CLK_GATE,
3034 .data = &omapctrl_ick_data,
3035};
3036
3037static struct ti_clk_fixed_factor omap_96m_d4_fck_data = {
3038 .parent = "omap_96m_fck",
3039 .div = 4,
3040 .mult = 1,
3041};
3042
3043static struct ti_clk omap_96m_d4_fck = {
3044 .name = "omap_96m_d4_fck",
3045 .type = TI_CLK_FIXED_FACTOR,
3046 .data = &omap_96m_d4_fck_data,
3047};
3048
3049static struct ti_clk_gate gpt6_ick_data = {
3050 .parent = "per_l4_ick",
3051 .bit_shift = 7,
3052 .reg = 0x1010,
3053 .module = TI_CLKM_CM,
3054 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3055};
3056
3057static struct ti_clk gpt6_ick = {
3058 .name = "gpt6_ick",
3059 .clkdm_name = "per_clkdm",
3060 .type = TI_CLK_GATE,
3061 .data = &gpt6_ick_data,
3062};
3063
3064static struct ti_clk_gate dpll3_m3x2_ck_omap36xx_data = {
3065 .parent = "dpll3_m3x2_mul_ck",
3066 .bit_shift = 0xc,
3067 .reg = 0xd00,
3068 .module = TI_CLKM_CM,
3069 .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
3070};
3071
3072static struct ti_clk dpll3_m3x2_ck_omap36xx = {
3073 .name = "dpll3_m3x2_ck",
3074 .type = TI_CLK_GATE,
3075 .data = &dpll3_m3x2_ck_omap36xx_data,
3076 .patch = &dpll3_m3x2_ck,
3077};
3078
3079static struct ti_clk_gate i2c3_ick_data = {
3080 .parent = "core_l4_ick",
3081 .bit_shift = 17,
3082 .reg = 0xa10,
3083 .module = TI_CLKM_CM,
3084 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3085};
3086
3087static struct ti_clk i2c3_ick = {
3088 .name = "i2c3_ick",
3089 .clkdm_name = "core_l4_clkdm",
3090 .type = TI_CLK_GATE,
3091 .data = &i2c3_ick_data,
3092};
3093
3094static struct ti_clk_gate gpio6_ick_data = {
3095 .parent = "per_l4_ick",
3096 .bit_shift = 17,
3097 .reg = 0x1010,
3098 .module = TI_CLKM_CM,
3099 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3100};
3101
3102static struct ti_clk gpio6_ick = {
3103 .name = "gpio6_ick",
3104 .clkdm_name = "per_clkdm",
3105 .type = TI_CLK_GATE,
3106 .data = &gpio6_ick_data,
3107};
3108
3109static struct ti_clk_gate mspro_ick_data = {
3110 .parent = "core_l4_ick",
3111 .bit_shift = 23,
3112 .reg = 0xa10,
3113 .module = TI_CLKM_CM,
3114 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3115};
3116
3117static struct ti_clk mspro_ick = {
3118 .name = "mspro_ick",
3119 .clkdm_name = "core_l4_clkdm",
3120 .type = TI_CLK_GATE,
3121 .data = &mspro_ick_data,
3122};
3123
3124static struct ti_clk_composite mcbsp1_fck_data = {
3125 .mux = &mcbsp1_mux_fck_data,
3126 .gate = &mcbsp1_gate_fck_data,
3127};
3128
3129static struct ti_clk mcbsp1_fck = {
3130 .name = "mcbsp1_fck",
3131 .type = TI_CLK_COMPOSITE,
3132 .data = &mcbsp1_fck_data,
3133};
3134
3135static struct ti_clk_gate gpt3_gate_fck_data = {
3136 .parent = "sys_ck",
3137 .bit_shift = 4,
3138 .reg = 0x1000,
3139 .module = TI_CLKM_CM,
3140};
3141
3142static struct ti_clk_fixed rmii_ck_data = {
3143 .frequency = 50000000,
3144};
3145
3146static struct ti_clk rmii_ck = {
3147 .name = "rmii_ck",
3148 .type = TI_CLK_FIXED,
3149 .data = &rmii_ck_data,
3150};
3151
3152static struct ti_clk_gate gpt6_gate_fck_data = {
3153 .parent = "sys_ck",
3154 .bit_shift = 7,
3155 .reg = 0x1000,
3156 .module = TI_CLKM_CM,
3157};
3158
3159static struct ti_clk_composite gpt6_fck_data = {
3160 .mux = &gpt6_mux_fck_data,
3161 .gate = &gpt6_gate_fck_data,
3162};
3163
3164static struct ti_clk gpt6_fck = {
3165 .name = "gpt6_fck",
3166 .type = TI_CLK_COMPOSITE,
3167 .data = &gpt6_fck_data,
3168};
3169
3170static struct ti_clk_fixed_factor dpll5_m2_d4_ck_data = {
3171 .parent = "dpll5_m2_ck",
3172 .div = 4,
3173 .mult = 1,
3174};
3175
3176static struct ti_clk dpll5_m2_d4_ck = {
3177 .name = "dpll5_m2_d4_ck",
3178 .type = TI_CLK_FIXED_FACTOR,
3179 .data = &dpll5_m2_d4_ck_data,
3180};
3181
3182static struct ti_clk_fixed_factor sys_d2_ck_data = {
3183 .parent = "sys_ck",
3184 .div = 2,
3185 .mult = 1,
3186};
3187
3188static struct ti_clk sys_d2_ck = {
3189 .name = "sys_d2_ck",
3190 .type = TI_CLK_FIXED_FACTOR,
3191 .data = &sys_d2_ck_data,
3192};
3193
3194static struct ti_clk_fixed_factor omap_96m_d2_fck_data = {
3195 .parent = "omap_96m_fck",
3196 .div = 2,
3197 .mult = 1,
3198};
3199
3200static struct ti_clk omap_96m_d2_fck = {
3201 .name = "omap_96m_d2_fck",
3202 .type = TI_CLK_FIXED_FACTOR,
3203 .data = &omap_96m_d2_fck_data,
3204};
3205
3206static struct ti_clk_fixed_factor dpll5_m2_d8_ck_data = {
3207 .parent = "dpll5_m2_ck",
3208 .div = 8,
3209 .mult = 1,
3210};
3211
3212static struct ti_clk dpll5_m2_d8_ck = {
3213 .name = "dpll5_m2_d8_ck",
3214 .type = TI_CLK_FIXED_FACTOR,
3215 .data = &dpll5_m2_d8_ck_data,
3216};
3217
3218static struct ti_clk_fixed_factor dpll5_m2_d16_ck_data = {
3219 .parent = "dpll5_m2_ck",
3220 .div = 16,
3221 .mult = 1,
3222};
3223
3224static struct ti_clk dpll5_m2_d16_ck = {
3225 .name = "dpll5_m2_d16_ck",
3226 .type = TI_CLK_FIXED_FACTOR,
3227 .data = &dpll5_m2_d16_ck_data,
3228};
3229
3230static const char *usim_mux_fck_parents[] = {
3231 "sys_ck",
3232 "sys_d2_ck",
3233 "omap_96m_d2_fck",
3234 "omap_96m_d4_fck",
3235 "omap_96m_d8_fck",
3236 "omap_96m_d10_fck",
3237 "dpll5_m2_d4_ck",
3238 "dpll5_m2_d8_ck",
3239 "dpll5_m2_d16_ck",
3240 "dpll5_m2_d20_ck",
3241};
3242
3243static struct ti_clk_mux usim_mux_fck_data = {
3244 .bit_shift = 3,
3245 .num_parents = ARRAY_SIZE(usim_mux_fck_parents),
3246 .reg = 0xc40,
3247 .module = TI_CLKM_CM,
3248 .parents = usim_mux_fck_parents,
3249 .flags = CLKF_INDEX_STARTS_AT_ONE,
3250};
3251
3252static struct ti_clk_composite usim_fck_data = {
3253 .mux = &usim_mux_fck_data,
3254 .gate = &usim_gate_fck_data,
3255};
3256
3257static struct ti_clk usim_fck = {
3258 .name = "usim_fck",
3259 .type = TI_CLK_COMPOSITE,
3260 .data = &usim_fck_data,
3261};
3262
3263static int ssi_ssr_div_fck_3430es2_divs[] = {
3264 0,
3265 1,
3266 2,
3267 3,
3268 4,
3269 0,
3270 6,
3271 0,
3272 8,
3273};
3274
3275static struct ti_clk_divider ssi_ssr_div_fck_3430es2_data = {
3276 .num_dividers = ARRAY_SIZE(ssi_ssr_div_fck_3430es2_divs),
3277 .parent = "corex2_fck",
3278 .bit_shift = 8,
3279 .dividers = ssi_ssr_div_fck_3430es2_divs,
3280 .reg = 0xa40,
3281 .module = TI_CLKM_CM,
3282};
3283
3284static struct ti_clk_composite ssi_ssr_fck_3430es2_data = {
3285 .gate = &ssi_ssr_gate_fck_3430es2_data,
3286 .divider = &ssi_ssr_div_fck_3430es2_data,
3287};
3288
3289static struct ti_clk ssi_ssr_fck_3430es2 = {
3290 .name = "ssi_ssr_fck",
3291 .type = TI_CLK_COMPOSITE,
3292 .data = &ssi_ssr_fck_3430es2_data,
3293};
3294
3295static struct ti_clk_gate dss1_alwon_fck_3430es1_data = {
3296 .parent = "dpll4_m4x2_ck",
3297 .bit_shift = 0,
3298 .reg = 0xe00,
3299 .module = TI_CLKM_CM,
3300 .flags = CLKF_SET_RATE_PARENT,
3301};
3302
3303static struct ti_clk dss1_alwon_fck_3430es1 = {
3304 .name = "dss1_alwon_fck",
3305 .clkdm_name = "dss_clkdm",
3306 .type = TI_CLK_GATE,
3307 .data = &dss1_alwon_fck_3430es1_data,
3308};
3309
3310static struct ti_clk_gate gpt3_ick_data = {
3311 .parent = "per_l4_ick",
3312 .bit_shift = 4,
3313 .reg = 0x1010,
3314 .module = TI_CLKM_CM,
3315 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3316};
3317
3318static struct ti_clk gpt3_ick = {
3319 .name = "gpt3_ick",
3320 .clkdm_name = "per_clkdm",
3321 .type = TI_CLK_GATE,
3322 .data = &gpt3_ick_data,
3323};
3324
3325static struct ti_clk_fixed_factor omap_12m_fck_data = {
3326 .parent = "omap_48m_fck",
3327 .div = 4,
3328 .mult = 1,
3329};
3330
3331static struct ti_clk omap_12m_fck = {
3332 .name = "omap_12m_fck",
3333 .type = TI_CLK_FIXED_FACTOR,
3334 .data = &omap_12m_fck_data,
3335};
3336
3337static struct ti_clk_fixed_factor core_12m_fck_data = {
3338 .parent = "omap_12m_fck",
3339 .div = 1,
3340 .mult = 1,
3341};
3342
3343static struct ti_clk core_12m_fck = {
3344 .name = "core_12m_fck",
3345 .type = TI_CLK_FIXED_FACTOR,
3346 .data = &core_12m_fck_data,
3347};
3348
3349static struct ti_clk_gate hdq_fck_data = {
3350 .parent = "core_12m_fck",
3351 .bit_shift = 22,
3352 .reg = 0xa00,
3353 .module = TI_CLKM_CM,
3354 .flags = CLKF_WAIT,
3355};
3356
3357static struct ti_clk hdq_fck = {
3358 .name = "hdq_fck",
3359 .clkdm_name = "core_l4_clkdm",
3360 .type = TI_CLK_GATE,
3361 .data = &hdq_fck_data,
3362};
3363
3364static struct ti_clk_gate usbtll_fck_data = {
3365 .parent = "dpll5_m2_ck",
3366 .bit_shift = 2,
3367 .reg = 0xa08,
3368 .module = TI_CLKM_CM,
3369 .flags = CLKF_WAIT,
3370};
3371
3372static struct ti_clk usbtll_fck = {
3373 .name = "usbtll_fck",
3374 .clkdm_name = "core_l4_clkdm",
3375 .type = TI_CLK_GATE,
3376 .data = &usbtll_fck_data,
3377};
3378
3379static struct ti_clk_gate hsotgusb_fck_am35xx_data = {
3380 .parent = "sys_ck",
3381 .bit_shift = 8,
3382 .reg = 0x59c,
3383 .module = TI_CLKM_SCRM,
3384};
3385
3386static struct ti_clk hsotgusb_fck_am35xx = {
3387 .name = "hsotgusb_fck_am35xx",
3388 .clkdm_name = "core_l3_clkdm",
3389 .type = TI_CLK_GATE,
3390 .data = &hsotgusb_fck_am35xx_data,
3391};
3392
3393static struct ti_clk_gate hsotgusb_ick_3430es2_data = {
3394 .parent = "core_l3_ick",
3395 .bit_shift = 4,
3396 .reg = 0xa10,
3397 .module = TI_CLKM_CM,
3398 .flags = CLKF_HSOTGUSB | CLKF_OMAP3 | CLKF_INTERFACE,
3399};
3400
3401static struct ti_clk hsotgusb_ick_3430es2 = {
3402 .name = "hsotgusb_ick_3430es2",
3403 .clkdm_name = "core_l3_clkdm",
3404 .type = TI_CLK_GATE,
3405 .data = &hsotgusb_ick_3430es2_data,
3406};
3407
3408static struct ti_clk_gate gfx_l3_ck_data = {
3409 .parent = "l3_ick",
3410 .bit_shift = 0,
3411 .reg = 0xb10,
3412 .module = TI_CLKM_CM,
3413 .flags = CLKF_WAIT,
3414};
3415
3416static struct ti_clk gfx_l3_ck = {
3417 .name = "gfx_l3_ck",
3418 .clkdm_name = "gfx_3430es1_clkdm",
3419 .type = TI_CLK_GATE,
3420 .data = &gfx_l3_ck_data,
3421};
3422
3423static struct ti_clk_fixed_factor gfx_l3_ick_data = {
3424 .parent = "gfx_l3_ck",
3425 .div = 1,
3426 .mult = 1,
3427};
3428
3429static struct ti_clk gfx_l3_ick = {
3430 .name = "gfx_l3_ick",
3431 .type = TI_CLK_FIXED_FACTOR,
3432 .data = &gfx_l3_ick_data,
3433};
3434
3435static struct ti_clk_gate mcbsp1_ick_data = {
3436 .parent = "core_l4_ick",
3437 .bit_shift = 9,
3438 .reg = 0xa10,
3439 .module = TI_CLKM_CM,
3440 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3441};
3442
3443static struct ti_clk mcbsp1_ick = {
3444 .name = "mcbsp1_ick",
3445 .clkdm_name = "core_l4_clkdm",
3446 .type = TI_CLK_GATE,
3447 .data = &mcbsp1_ick_data,
3448};
3449
3450static struct ti_clk_fixed_factor gpt12_fck_data = {
3451 .parent = "secure_32k_fck",
3452 .div = 1,
3453 .mult = 1,
3454};
3455
3456static struct ti_clk gpt12_fck = {
3457 .name = "gpt12_fck",
3458 .type = TI_CLK_FIXED_FACTOR,
3459 .data = &gpt12_fck_data,
3460};
3461
3462static struct ti_clk_gate gfx_cg2_ck_data = {
3463 .parent = "gfx_l3_fck",
3464 .bit_shift = 2,
3465 .reg = 0xb00,
3466 .module = TI_CLKM_CM,
3467 .flags = CLKF_WAIT,
3468};
3469
3470static struct ti_clk gfx_cg2_ck = {
3471 .name = "gfx_cg2_ck",
3472 .clkdm_name = "gfx_3430es1_clkdm",
3473 .type = TI_CLK_GATE,
3474 .data = &gfx_cg2_ck_data,
3475};
3476
3477static struct ti_clk_gate i2c2_ick_data = {
3478 .parent = "core_l4_ick",
3479 .bit_shift = 16,
3480 .reg = 0xa10,
3481 .module = TI_CLKM_CM,
3482 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3483};
3484
3485static struct ti_clk i2c2_ick = {
3486 .name = "i2c2_ick",
3487 .clkdm_name = "core_l4_clkdm",
3488 .type = TI_CLK_GATE,
3489 .data = &i2c2_ick_data,
3490};
3491
3492static struct ti_clk_gate gpio4_dbck_data = {
3493 .parent = "per_32k_alwon_fck",
3494 .bit_shift = 15,
3495 .reg = 0x1000,
3496 .module = TI_CLKM_CM,
3497};
3498
3499static struct ti_clk gpio4_dbck = {
3500 .name = "gpio4_dbck",
3501 .clkdm_name = "per_clkdm",
3502 .type = TI_CLK_GATE,
3503 .data = &gpio4_dbck_data,
3504};
3505
3506static struct ti_clk_gate i2c3_fck_data = {
3507 .parent = "core_96m_fck",
3508 .bit_shift = 17,
3509 .reg = 0xa00,
3510 .module = TI_CLKM_CM,
3511 .flags = CLKF_WAIT,
3512};
3513
3514static struct ti_clk i2c3_fck = {
3515 .name = "i2c3_fck",
3516 .clkdm_name = "core_l4_clkdm",
3517 .type = TI_CLK_GATE,
3518 .data = &i2c3_fck_data,
3519};
3520
3521static struct ti_clk_composite gpt3_fck_data = {
3522 .mux = &gpt3_mux_fck_data,
3523 .gate = &gpt3_gate_fck_data,
3524};
3525
3526static struct ti_clk gpt3_fck = {
3527 .name = "gpt3_fck",
3528 .type = TI_CLK_COMPOSITE,
3529 .data = &gpt3_fck_data,
3530};
3531
3532static struct ti_clk_gate i2c1_ick_data = {
3533 .parent = "core_l4_ick",
3534 .bit_shift = 15,
3535 .reg = 0xa10,
3536 .module = TI_CLKM_CM,
3537 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3538};
3539
3540static struct ti_clk i2c1_ick = {
3541 .name = "i2c1_ick",
3542 .clkdm_name = "core_l4_clkdm",
3543 .type = TI_CLK_GATE,
3544 .data = &i2c1_ick_data,
3545};
3546
3547static struct ti_clk_gate omap_32ksync_ick_data = {
3548 .parent = "wkup_l4_ick",
3549 .bit_shift = 2,
3550 .reg = 0xc10,
3551 .module = TI_CLKM_CM,
3552 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3553};
3554
3555static struct ti_clk omap_32ksync_ick = {
3556 .name = "omap_32ksync_ick",
3557 .clkdm_name = "wkup_clkdm",
3558 .type = TI_CLK_GATE,
3559 .data = &omap_32ksync_ick_data,
3560};
3561
3562static struct ti_clk_gate aes2_ick_data = {
3563 .parent = "core_l4_ick",
3564 .bit_shift = 28,
3565 .reg = 0xa10,
3566 .module = TI_CLKM_CM,
3567 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3568};
3569
3570static struct ti_clk aes2_ick = {
3571 .name = "aes2_ick",
3572 .clkdm_name = "core_l4_clkdm",
3573 .type = TI_CLK_GATE,
3574 .data = &aes2_ick_data,
3575};
3576
3577static const char *gpt8_mux_fck_parents[] = {
3578 "omap_32k_fck",
3579 "sys_ck",
3580};
3581
3582static struct ti_clk_mux gpt8_mux_fck_data = {
3583 .bit_shift = 6,
3584 .num_parents = ARRAY_SIZE(gpt8_mux_fck_parents),
3585 .reg = 0x1040,
3586 .module = TI_CLKM_CM,
3587 .parents = gpt8_mux_fck_parents,
3588};
3589
3590static struct ti_clk_composite gpt8_fck_data = {
3591 .mux = &gpt8_mux_fck_data,
3592 .gate = &gpt8_gate_fck_data,
3593};
3594
3595static struct ti_clk gpt8_fck = {
3596 .name = "gpt8_fck",
3597 .type = TI_CLK_COMPOSITE,
3598 .data = &gpt8_fck_data,
3599};
3600
3601static struct ti_clk_gate mcbsp4_gate_fck_data = {
3602 .parent = "mcbsp_clks",
3603 .bit_shift = 2,
3604 .reg = 0x1000,
3605 .module = TI_CLKM_CM,
3606};
3607
3608static struct ti_clk_composite mcbsp4_fck_data = {
3609 .mux = &mcbsp4_mux_fck_data,
3610 .gate = &mcbsp4_gate_fck_data,
3611};
3612
3613static struct ti_clk mcbsp4_fck = {
3614 .name = "mcbsp4_fck",
3615 .type = TI_CLK_COMPOSITE,
3616 .data = &mcbsp4_fck_data,
3617};
3618
3619static struct ti_clk_gate gpio2_dbck_data = {
3620 .parent = "per_32k_alwon_fck",
3621 .bit_shift = 13,
3622 .reg = 0x1000,
3623 .module = TI_CLKM_CM,
3624};
3625
3626static struct ti_clk gpio2_dbck = {
3627 .name = "gpio2_dbck",
3628 .clkdm_name = "per_clkdm",
3629 .type = TI_CLK_GATE,
3630 .data = &gpio2_dbck_data,
3631};
3632
3633static struct ti_clk_gate usbtll_ick_data = {
3634 .parent = "core_l4_ick",
3635 .bit_shift = 2,
3636 .reg = 0xa18,
3637 .module = TI_CLKM_CM,
3638 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3639};
3640
3641static struct ti_clk usbtll_ick = {
3642 .name = "usbtll_ick",
3643 .clkdm_name = "core_l4_clkdm",
3644 .type = TI_CLK_GATE,
3645 .data = &usbtll_ick_data,
3646};
3647
3648static struct ti_clk_gate mcspi4_ick_data = {
3649 .parent = "core_l4_ick",
3650 .bit_shift = 21,
3651 .reg = 0xa10,
3652 .module = TI_CLKM_CM,
3653 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3654};
3655
3656static struct ti_clk mcspi4_ick = {
3657 .name = "mcspi4_ick",
3658 .clkdm_name = "core_l4_clkdm",
3659 .type = TI_CLK_GATE,
3660 .data = &mcspi4_ick_data,
3661};
3662
3663static struct ti_clk_gate dss_96m_fck_data = {
3664 .parent = "omap_96m_fck",
3665 .bit_shift = 2,
3666 .reg = 0xe00,
3667 .module = TI_CLKM_CM,
3668};
3669
3670static struct ti_clk dss_96m_fck = {
3671 .name = "dss_96m_fck",
3672 .clkdm_name = "dss_clkdm",
3673 .type = TI_CLK_GATE,
3674 .data = &dss_96m_fck_data,
3675};
3676
3677static struct ti_clk_divider rm_ick_data = {
3678 .parent = "l4_ick",
3679 .bit_shift = 1,
3680 .max_div = 3,
3681 .reg = 0xc40,
3682 .module = TI_CLKM_CM,
3683 .flags = CLKF_INDEX_STARTS_AT_ONE,
3684};
3685
3686static struct ti_clk rm_ick = {
3687 .name = "rm_ick",
3688 .type = TI_CLK_DIVIDER,
3689 .data = &rm_ick_data,
3690};
3691
3692static struct ti_clk_gate hdq_ick_data = {
3693 .parent = "core_l4_ick",
3694 .bit_shift = 22,
3695 .reg = 0xa10,
3696 .module = TI_CLKM_CM,
3697 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3698};
3699
3700static struct ti_clk hdq_ick = {
3701 .name = "hdq_ick",
3702 .clkdm_name = "core_l4_clkdm",
3703 .type = TI_CLK_GATE,
3704 .data = &hdq_ick_data,
3705};
3706
3707static struct ti_clk_fixed_factor dpll3_x2_ck_data = {
3708 .parent = "dpll3_ck",
3709 .div = 1,
3710 .mult = 2,
3711};
3712
3713static struct ti_clk dpll3_x2_ck = {
3714 .name = "dpll3_x2_ck",
3715 .type = TI_CLK_FIXED_FACTOR,
3716 .data = &dpll3_x2_ck_data,
3717};
3718
3719static struct ti_clk_gate mad2d_ick_data = {
3720 .parent = "l3_ick",
3721 .bit_shift = 3,
3722 .reg = 0xa18,
3723 .module = TI_CLKM_CM,
3724 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3725};
3726
3727static struct ti_clk mad2d_ick = {
3728 .name = "mad2d_ick",
3729 .clkdm_name = "d2d_clkdm",
3730 .type = TI_CLK_GATE,
3731 .data = &mad2d_ick_data,
3732};
3733
3734static struct ti_clk_gate fshostusb_fck_data = {
3735 .parent = "core_48m_fck",
3736 .bit_shift = 5,
3737 .reg = 0xa00,
3738 .module = TI_CLKM_CM,
3739 .flags = CLKF_WAIT,
3740};
3741
3742static struct ti_clk fshostusb_fck = {
3743 .name = "fshostusb_fck",
3744 .clkdm_name = "core_l4_clkdm",
3745 .type = TI_CLK_GATE,
3746 .data = &fshostusb_fck_data,
3747};
3748
3749static struct ti_clk_gate sr1_fck_data = {
3750 .parent = "sys_ck",
3751 .bit_shift = 6,
3752 .reg = 0xc00,
3753 .module = TI_CLKM_CM,
3754 .flags = CLKF_WAIT,
3755};
3756
3757static struct ti_clk sr1_fck = {
3758 .name = "sr1_fck",
3759 .clkdm_name = "wkup_clkdm",
3760 .type = TI_CLK_GATE,
3761 .data = &sr1_fck_data,
3762};
3763
3764static struct ti_clk_gate des2_ick_data = {
3765 .parent = "core_l4_ick",
3766 .bit_shift = 26,
3767 .reg = 0xa10,
3768 .module = TI_CLKM_CM,
3769 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3770};
3771
3772static struct ti_clk des2_ick = {
3773 .name = "des2_ick",
3774 .clkdm_name = "core_l4_clkdm",
3775 .type = TI_CLK_GATE,
3776 .data = &des2_ick_data,
3777};
3778
3779static struct ti_clk_gate sdrc_ick_data = {
3780 .parent = "core_l3_ick",
3781 .bit_shift = 1,
3782 .reg = 0xa10,
3783 .module = TI_CLKM_CM,
3784 .flags = CLKF_WAIT,
3785};
3786
3787static struct ti_clk sdrc_ick = {
3788 .name = "sdrc_ick",
3789 .clkdm_name = "core_l3_clkdm",
3790 .type = TI_CLK_GATE,
3791 .data = &sdrc_ick_data,
3792};
3793
3794static struct ti_clk_composite gpt4_fck_data = {
3795 .mux = &gpt4_mux_fck_data,
3796 .gate = &gpt4_gate_fck_data,
3797};
3798
3799static struct ti_clk gpt4_fck = {
3800 .name = "gpt4_fck",
3801 .type = TI_CLK_COMPOSITE,
3802 .data = &gpt4_fck_data,
3803};
3804
3805static struct ti_clk_gate dpll4_m3x2_ck_omap36xx_data = {
3806 .parent = "dpll4_m3x2_mul_ck",
3807 .bit_shift = 0x1c,
3808 .reg = 0xd00,
3809 .module = TI_CLKM_CM,
3810 .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
3811};
3812
3813static struct ti_clk dpll4_m3x2_ck_omap36xx = {
3814 .name = "dpll4_m3x2_ck",
3815 .type = TI_CLK_GATE,
3816 .data = &dpll4_m3x2_ck_omap36xx_data,
3817 .patch = &dpll4_m3x2_ck,
3818};
3819
3820static struct ti_clk_gate cpefuse_fck_data = {
3821 .parent = "sys_ck",
3822 .bit_shift = 0,
3823 .reg = 0xa08,
3824 .module = TI_CLKM_CM,
3825};
3826
3827static struct ti_clk cpefuse_fck = {
3828 .name = "cpefuse_fck",
3829 .clkdm_name = "core_l4_clkdm",
3830 .type = TI_CLK_GATE,
3831 .data = &cpefuse_fck_data,
3832};
3833
3834static struct ti_clk_gate mcspi3_ick_data = {
3835 .parent = "core_l4_ick",
3836 .bit_shift = 20,
3837 .reg = 0xa10,
3838 .module = TI_CLKM_CM,
3839 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3840};
3841
3842static struct ti_clk mcspi3_ick = {
3843 .name = "mcspi3_ick",
3844 .clkdm_name = "core_l4_clkdm",
3845 .type = TI_CLK_GATE,
3846 .data = &mcspi3_ick_data,
3847};
3848
3849static struct ti_clk_fixed_factor ssi_sst_fck_3430es2_data = {
3850 .parent = "ssi_ssr_fck",
3851 .div = 2,
3852 .mult = 1,
3853};
3854
3855static struct ti_clk ssi_sst_fck_3430es2 = {
3856 .name = "ssi_sst_fck",
3857 .type = TI_CLK_FIXED_FACTOR,
3858 .data = &ssi_sst_fck_3430es2_data,
3859};
3860
3861static struct ti_clk_gate gpio1_dbck_data = {
3862 .parent = "wkup_32k_fck",
3863 .bit_shift = 3,
3864 .reg = 0xc00,
3865 .module = TI_CLKM_CM,
3866};
3867
3868static struct ti_clk gpio1_dbck = {
3869 .name = "gpio1_dbck",
3870 .clkdm_name = "wkup_clkdm",
3871 .type = TI_CLK_GATE,
3872 .data = &gpio1_dbck_data,
3873};
3874
3875static struct ti_clk_gate gpt4_ick_data = {
3876 .parent = "per_l4_ick",
3877 .bit_shift = 5,
3878 .reg = 0x1010,
3879 .module = TI_CLKM_CM,
3880 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3881};
3882
3883static struct ti_clk gpt4_ick = {
3884 .name = "gpt4_ick",
3885 .clkdm_name = "per_clkdm",
3886 .type = TI_CLK_GATE,
3887 .data = &gpt4_ick_data,
3888};
3889
3890static struct ti_clk_gate gpt2_ick_data = {
3891 .parent = "per_l4_ick",
3892 .bit_shift = 3,
3893 .reg = 0x1010,
3894 .module = TI_CLKM_CM,
3895 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3896};
3897
3898static struct ti_clk gpt2_ick = {
3899 .name = "gpt2_ick",
3900 .clkdm_name = "per_clkdm",
3901 .type = TI_CLK_GATE,
3902 .data = &gpt2_ick_data,
3903};
3904
3905static struct ti_clk_gate mmchs1_fck_data = {
3906 .parent = "core_96m_fck",
3907 .bit_shift = 24,
3908 .reg = 0xa00,
3909 .module = TI_CLKM_CM,
3910 .flags = CLKF_WAIT,
3911};
3912
3913static struct ti_clk mmchs1_fck = {
3914 .name = "mmchs1_fck",
3915 .clkdm_name = "core_l4_clkdm",
3916 .type = TI_CLK_GATE,
3917 .data = &mmchs1_fck_data,
3918};
3919
3920static struct ti_clk_fixed dummy_apb_pclk_data = {
3921 .frequency = 0x0,
3922};
3923
3924static struct ti_clk dummy_apb_pclk = {
3925 .name = "dummy_apb_pclk",
3926 .type = TI_CLK_FIXED,
3927 .data = &dummy_apb_pclk_data,
3928};
3929
3930static struct ti_clk_gate gpio6_dbck_data = {
3931 .parent = "per_32k_alwon_fck",
3932 .bit_shift = 17,
3933 .reg = 0x1000,
3934 .module = TI_CLKM_CM,
3935};
3936
3937static struct ti_clk gpio6_dbck = {
3938 .name = "gpio6_dbck",
3939 .clkdm_name = "per_clkdm",
3940 .type = TI_CLK_GATE,
3941 .data = &gpio6_dbck_data,
3942};
3943
3944static struct ti_clk_gate uart2_ick_data = {
3945 .parent = "core_l4_ick",
3946 .bit_shift = 14,
3947 .reg = 0xa10,
3948 .module = TI_CLKM_CM,
3949 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3950};
3951
3952static struct ti_clk uart2_ick = {
3953 .name = "uart2_ick",
3954 .clkdm_name = "core_l4_clkdm",
3955 .type = TI_CLK_GATE,
3956 .data = &uart2_ick_data,
3957};
3958
3959static struct ti_clk_fixed_factor dpll4_x2_ck_data = {
3960 .parent = "dpll4_ck",
3961 .div = 1,
3962 .mult = 2,
3963};
3964
3965static struct ti_clk dpll4_x2_ck = {
3966 .name = "dpll4_x2_ck",
3967 .type = TI_CLK_FIXED_FACTOR,
3968 .data = &dpll4_x2_ck_data,
3969};
3970
3971static struct ti_clk_gate gpt7_ick_data = {
3972 .parent = "per_l4_ick",
3973 .bit_shift = 8,
3974 .reg = 0x1010,
3975 .module = TI_CLKM_CM,
3976 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3977};
3978
3979static struct ti_clk gpt7_ick = {
3980 .name = "gpt7_ick",
3981 .clkdm_name = "per_clkdm",
3982 .type = TI_CLK_GATE,
3983 .data = &gpt7_ick_data,
3984};
3985
3986static struct ti_clk_gate dss_tv_fck_data = {
3987 .parent = "omap_54m_fck",
3988 .bit_shift = 2,
3989 .reg = 0xe00,
3990 .module = TI_CLKM_CM,
3991};
3992
3993static struct ti_clk dss_tv_fck = {
3994 .name = "dss_tv_fck",
3995 .clkdm_name = "dss_clkdm",
3996 .type = TI_CLK_GATE,
3997 .data = &dss_tv_fck_data,
3998};
3999
4000static struct ti_clk_gate mcbsp5_ick_data = {
4001 .parent = "core_l4_ick",
4002 .bit_shift = 10,
4003 .reg = 0xa10,
4004 .module = TI_CLKM_CM,
4005 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4006};
4007
4008static struct ti_clk mcbsp5_ick = {
4009 .name = "mcbsp5_ick",
4010 .clkdm_name = "core_l4_clkdm",
4011 .type = TI_CLK_GATE,
4012 .data = &mcbsp5_ick_data,
4013};
4014
4015static struct ti_clk_gate mcspi1_ick_data = {
4016 .parent = "core_l4_ick",
4017 .bit_shift = 18,
4018 .reg = 0xa10,
4019 .module = TI_CLKM_CM,
4020 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4021};
4022
4023static struct ti_clk mcspi1_ick = {
4024 .name = "mcspi1_ick",
4025 .clkdm_name = "core_l4_clkdm",
4026 .type = TI_CLK_GATE,
4027 .data = &mcspi1_ick_data,
4028};
4029
4030static struct ti_clk_gate d2d_26m_fck_data = {
4031 .parent = "sys_ck",
4032 .bit_shift = 3,
4033 .reg = 0xa00,
4034 .module = TI_CLKM_CM,
4035 .flags = CLKF_WAIT,
4036};
4037
4038static struct ti_clk d2d_26m_fck = {
4039 .name = "d2d_26m_fck",
4040 .clkdm_name = "d2d_clkdm",
4041 .type = TI_CLK_GATE,
4042 .data = &d2d_26m_fck_data,
4043};
4044
4045static struct ti_clk_gate wdt3_ick_data = {
4046 .parent = "per_l4_ick",
4047 .bit_shift = 12,
4048 .reg = 0x1010,
4049 .module = TI_CLKM_CM,
4050 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4051};
4052
4053static struct ti_clk wdt3_ick = {
4054 .name = "wdt3_ick",
4055 .clkdm_name = "per_clkdm",
4056 .type = TI_CLK_GATE,
4057 .data = &wdt3_ick_data,
4058};
4059
4060static struct ti_clk_divider pclkx2_fck_data = {
4061 .parent = "emu_src_ck",
4062 .bit_shift = 6,
4063 .max_div = 3,
4064 .reg = 0x1140,
4065 .module = TI_CLKM_CM,
4066 .flags = CLKF_INDEX_STARTS_AT_ONE,
4067};
4068
4069static struct ti_clk pclkx2_fck = {
4070 .name = "pclkx2_fck",
4071 .type = TI_CLK_DIVIDER,
4072 .data = &pclkx2_fck_data,
4073};
4074
4075static struct ti_clk_gate sha12_ick_data = {
4076 .parent = "core_l4_ick",
4077 .bit_shift = 27,
4078 .reg = 0xa10,
4079 .module = TI_CLKM_CM,
4080 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4081};
4082
4083static struct ti_clk sha12_ick = {
4084 .name = "sha12_ick",
4085 .clkdm_name = "core_l4_clkdm",
4086 .type = TI_CLK_GATE,
4087 .data = &sha12_ick_data,
4088};
4089
4090static struct ti_clk_gate emac_fck_data = {
4091 .parent = "rmii_ck",
4092 .bit_shift = 9,
4093 .reg = 0x59c,
4094 .module = TI_CLKM_SCRM,
4095};
4096
4097static struct ti_clk emac_fck = {
4098 .name = "emac_fck",
4099 .type = TI_CLK_GATE,
4100 .data = &emac_fck_data,
4101};
4102
4103static struct ti_clk_composite gpt10_fck_data = {
4104 .mux = &gpt10_mux_fck_data,
4105 .gate = &gpt10_gate_fck_data,
4106};
4107
4108static struct ti_clk gpt10_fck = {
4109 .name = "gpt10_fck",
4110 .type = TI_CLK_COMPOSITE,
4111 .data = &gpt10_fck_data,
4112};
4113
4114static struct ti_clk_gate wdt2_fck_data = {
4115 .parent = "wkup_32k_fck",
4116 .bit_shift = 5,
4117 .reg = 0xc00,
4118 .module = TI_CLKM_CM,
4119 .flags = CLKF_WAIT,
4120};
4121
4122static struct ti_clk wdt2_fck = {
4123 .name = "wdt2_fck",
4124 .clkdm_name = "wkup_clkdm",
4125 .type = TI_CLK_GATE,
4126 .data = &wdt2_fck_data,
4127};
4128
4129static struct ti_clk_gate cam_ick_data = {
4130 .parent = "l4_ick",
4131 .bit_shift = 0,
4132 .reg = 0xf10,
4133 .module = TI_CLKM_CM,
4134 .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
4135};
4136
4137static struct ti_clk cam_ick = {
4138 .name = "cam_ick",
4139 .clkdm_name = "cam_clkdm",
4140 .type = TI_CLK_GATE,
4141 .data = &cam_ick_data,
4142};
4143
4144static struct ti_clk_gate ssi_ick_3430es2_data = {
4145 .parent = "ssi_l4_ick",
4146 .bit_shift = 0,
4147 .reg = 0xa10,
4148 .module = TI_CLKM_CM,
4149 .flags = CLKF_SSI | CLKF_OMAP3 | CLKF_INTERFACE,
4150};
4151
4152static struct ti_clk ssi_ick_3430es2 = {
4153 .name = "ssi_ick",
4154 .clkdm_name = "core_l4_clkdm",
4155 .type = TI_CLK_GATE,
4156 .data = &ssi_ick_3430es2_data,
4157};
4158
4159static struct ti_clk_gate gpio4_ick_data = {
4160 .parent = "per_l4_ick",
4161 .bit_shift = 15,
4162 .reg = 0x1010,
4163 .module = TI_CLKM_CM,
4164 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4165};
4166
4167static struct ti_clk gpio4_ick = {
4168 .name = "gpio4_ick",
4169 .clkdm_name = "per_clkdm",
4170 .type = TI_CLK_GATE,
4171 .data = &gpio4_ick_data,
4172};
4173
4174static struct ti_clk_gate wdt1_ick_data = {
4175 .parent = "wkup_l4_ick",
4176 .bit_shift = 4,
4177 .reg = 0xc10,
4178 .module = TI_CLKM_CM,
4179 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4180};
4181
4182static struct ti_clk wdt1_ick = {
4183 .name = "wdt1_ick",
4184 .clkdm_name = "wkup_clkdm",
4185 .type = TI_CLK_GATE,
4186 .data = &wdt1_ick_data,
4187};
4188
4189static struct ti_clk_gate rng_ick_data = {
4190 .parent = "security_l4_ick2",
4191 .bit_shift = 2,
4192 .reg = 0xa14,
4193 .module = TI_CLKM_CM,
4194 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4195};
4196
4197static struct ti_clk rng_ick = {
4198 .name = "rng_ick",
4199 .type = TI_CLK_GATE,
4200 .data = &rng_ick_data,
4201};
4202
4203static struct ti_clk_gate icr_ick_data = {
4204 .parent = "core_l4_ick",
4205 .bit_shift = 29,
4206 .reg = 0xa10,
4207 .module = TI_CLKM_CM,
4208 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4209};
4210
4211static struct ti_clk icr_ick = {
4212 .name = "icr_ick",
4213 .clkdm_name = "core_l4_clkdm",
4214 .type = TI_CLK_GATE,
4215 .data = &icr_ick_data,
4216};
4217
4218static struct ti_clk_gate sgx_ick_data = {
4219 .parent = "l3_ick",
4220 .bit_shift = 0,
4221 .reg = 0xb10,
4222 .module = TI_CLKM_CM,
4223 .flags = CLKF_WAIT,
4224};
4225
4226static struct ti_clk sgx_ick = {
4227 .name = "sgx_ick",
4228 .clkdm_name = "sgx_clkdm",
4229 .type = TI_CLK_GATE,
4230 .data = &sgx_ick_data,
4231};
4232
4233static struct ti_clk_divider sys_clkout2_data = {
4234 .parent = "clkout2_src_ck",
4235 .bit_shift = 3,
4236 .max_div = 64,
4237 .reg = 0xd70,
4238 .module = TI_CLKM_CM,
4239 .flags = CLKF_INDEX_POWER_OF_TWO,
4240};
4241
4242static struct ti_clk sys_clkout2 = {
4243 .name = "sys_clkout2",
4244 .type = TI_CLK_DIVIDER,
4245 .data = &sys_clkout2_data,
4246};
4247
4248static struct ti_clk_alias omap34xx_omap36xx_clks[] = {
4249 CLK(NULL, "security_l4_ick2", &security_l4_ick2),
4250 CLK(NULL, "aes1_ick", &aes1_ick),
4251 CLK("omap_rng", "ick", &rng_ick),
4252 CLK("omap3-rom-rng", "ick", &rng_ick),
4253 CLK(NULL, "sha11_ick", &sha11_ick),
4254 CLK(NULL, "des1_ick", &des1_ick),
4255 CLK(NULL, "cam_mclk", &cam_mclk),
4256 CLK(NULL, "cam_ick", &cam_ick),
4257 CLK(NULL, "csi2_96m_fck", &csi2_96m_fck),
4258 CLK(NULL, "security_l3_ick", &security_l3_ick),
4259 CLK(NULL, "pka_ick", &pka_ick),
4260 CLK(NULL, "icr_ick", &icr_ick),
4261 CLK(NULL, "des2_ick", &des2_ick),
4262 CLK(NULL, "mspro_ick", &mspro_ick),
4263 CLK(NULL, "mailboxes_ick", &mailboxes_ick),
4264 CLK(NULL, "ssi_l4_ick", &ssi_l4_ick),
4265 CLK(NULL, "sr1_fck", &sr1_fck),
4266 CLK(NULL, "sr2_fck", &sr2_fck),
4267 CLK(NULL, "sr_l4_ick", &sr_l4_ick),
4268 CLK(NULL, "dpll2_fck", &dpll2_fck),
4269 CLK(NULL, "dpll2_ck", &dpll2_ck),
4270 CLK(NULL, "dpll2_m2_ck", &dpll2_m2_ck),
4271 CLK(NULL, "iva2_ck", &iva2_ck),
4272 CLK(NULL, "modem_fck", &modem_fck),
4273 CLK(NULL, "sad2d_ick", &sad2d_ick),
4274 CLK(NULL, "mad2d_ick", &mad2d_ick),
4275 CLK(NULL, "mspro_fck", &mspro_fck),
4276 { NULL },
4277};
4278
4279static struct ti_clk_alias omap36xx_omap3430es2plus_clks[] = {
4280 CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2),
4281 CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2),
4282 CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es2),
4283 CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es2),
4284 CLK(NULL, "ssi_ick", &ssi_ick_3430es2),
4285 CLK(NULL, "sys_d2_ck", &sys_d2_ck),
4286 CLK(NULL, "omap_96m_d2_fck", &omap_96m_d2_fck),
4287 CLK(NULL, "omap_96m_d4_fck", &omap_96m_d4_fck),
4288 CLK(NULL, "omap_96m_d8_fck", &omap_96m_d8_fck),
4289 CLK(NULL, "omap_96m_d10_fck", &omap_96m_d10_fck),
4290 CLK(NULL, "dpll5_m2_d4_ck", &dpll5_m2_d4_ck),
4291 CLK(NULL, "dpll5_m2_d8_ck", &dpll5_m2_d8_ck),
4292 CLK(NULL, "dpll5_m2_d16_ck", &dpll5_m2_d16_ck),
4293 CLK(NULL, "dpll5_m2_d20_ck", &dpll5_m2_d20_ck),
4294 CLK(NULL, "usim_fck", &usim_fck),
4295 CLK(NULL, "usim_ick", &usim_ick),
4296 { NULL },
4297};
4298
4299static struct ti_clk_alias omap3xxx_clks[] = {
4300 CLK(NULL, "apb_pclk", &dummy_apb_pclk),
4301 CLK(NULL, "omap_32k_fck", &omap_32k_fck),
4302 CLK(NULL, "virt_12m_ck", &virt_12m_ck),
4303 CLK(NULL, "virt_13m_ck", &virt_13m_ck),
4304 CLK(NULL, "virt_19200000_ck", &virt_19200000_ck),
4305 CLK(NULL, "virt_26000000_ck", &virt_26000000_ck),
4306 CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck),
4307 CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck),
4308 CLK(NULL, "osc_sys_ck", &osc_sys_ck),
4309 CLK("twl", "fck", &osc_sys_ck),
4310 CLK(NULL, "sys_ck", &sys_ck),
4311 CLK(NULL, "timer_sys_ck", &sys_ck),
4312 CLK(NULL, "dpll4_ck", &dpll4_ck),
4313 CLK(NULL, "dpll4_m2_ck", &dpll4_m2_ck),
4314 CLK(NULL, "dpll4_m2x2_mul_ck", &dpll4_m2x2_mul_ck),
4315 CLK(NULL, "dpll4_m2x2_ck", &dpll4_m2x2_ck),
4316 CLK(NULL, "omap_96m_alwon_fck", &omap_96m_alwon_fck),
4317 CLK(NULL, "dpll3_ck", &dpll3_ck),
4318 CLK(NULL, "dpll3_m3_ck", &dpll3_m3_ck),
4319 CLK(NULL, "dpll3_m3x2_mul_ck", &dpll3_m3x2_mul_ck),
4320 CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck),
4321 CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck),
4322 CLK(NULL, "sys_altclk", &sys_altclk),
4323 CLK(NULL, "mcbsp_clks", &mcbsp_clks),
4324 CLK(NULL, "sys_clkout1", &sys_clkout1),
4325 CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck),
4326 CLK(NULL, "core_ck", &core_ck),
4327 CLK(NULL, "dpll1_fck", &dpll1_fck),
4328 CLK(NULL, "dpll1_ck", &dpll1_ck),
4329 CLK(NULL, "cpufreq_ck", &dpll1_ck),
4330 CLK(NULL, "dpll1_x2_ck", &dpll1_x2_ck),
4331 CLK(NULL, "dpll1_x2m2_ck", &dpll1_x2m2_ck),
4332 CLK(NULL, "dpll3_x2_ck", &dpll3_x2_ck),
4333 CLK(NULL, "dpll3_m2x2_ck", &dpll3_m2x2_ck),
4334 CLK(NULL, "dpll4_x2_ck", &dpll4_x2_ck),
4335 CLK(NULL, "cm_96m_fck", &cm_96m_fck),
4336 CLK(NULL, "omap_96m_fck", &omap_96m_fck),
4337 CLK(NULL, "dpll4_m3_ck", &dpll4_m3_ck),
4338 CLK(NULL, "dpll4_m3x2_mul_ck", &dpll4_m3x2_mul_ck),
4339 CLK(NULL, "dpll4_m3x2_ck", &dpll4_m3x2_ck),
4340 CLK(NULL, "omap_54m_fck", &omap_54m_fck),
4341 CLK(NULL, "cm_96m_d2_fck", &cm_96m_d2_fck),
4342 CLK(NULL, "omap_48m_fck", &omap_48m_fck),
4343 CLK(NULL, "omap_12m_fck", &omap_12m_fck),
4344 CLK(NULL, "dpll4_m4_ck", &dpll4_m4_ck),
4345 CLK(NULL, "dpll4_m4x2_mul_ck", &dpll4_m4x2_mul_ck),
4346 CLK(NULL, "dpll4_m4x2_ck", &dpll4_m4x2_ck),
4347 CLK(NULL, "dpll4_m5_ck", &dpll4_m5_ck),
4348 CLK(NULL, "dpll4_m5x2_mul_ck", &dpll4_m5x2_mul_ck),
4349 CLK(NULL, "dpll4_m5x2_ck", &dpll4_m5x2_ck),
4350 CLK(NULL, "dpll4_m6_ck", &dpll4_m6_ck),
4351 CLK(NULL, "dpll4_m6x2_mul_ck", &dpll4_m6x2_mul_ck),
4352 CLK(NULL, "dpll4_m6x2_ck", &dpll4_m6x2_ck),
4353 CLK("etb", "emu_per_alwon_ck", &emu_per_alwon_ck),
4354 CLK(NULL, "clkout2_src_ck", &clkout2_src_ck),
4355 CLK(NULL, "sys_clkout2", &sys_clkout2),
4356 CLK(NULL, "corex2_fck", &corex2_fck),
4357 CLK(NULL, "mpu_ck", &mpu_ck),
4358 CLK(NULL, "arm_fck", &arm_fck),
4359 CLK("etb", "emu_mpu_alwon_ck", &emu_mpu_alwon_ck),
4360 CLK(NULL, "l3_ick", &l3_ick),
4361 CLK(NULL, "l4_ick", &l4_ick),
4362 CLK(NULL, "rm_ick", &rm_ick),
4363 CLK(NULL, "timer_32k_ck", &omap_32k_fck),
4364 CLK(NULL, "gpt10_fck", &gpt10_fck),
4365 CLK(NULL, "gpt11_fck", &gpt11_fck),
4366 CLK(NULL, "core_96m_fck", &core_96m_fck),
4367 CLK(NULL, "mmchs2_fck", &mmchs2_fck),
4368 CLK(NULL, "mmchs1_fck", &mmchs1_fck),
4369 CLK(NULL, "i2c3_fck", &i2c3_fck),
4370 CLK(NULL, "i2c2_fck", &i2c2_fck),
4371 CLK(NULL, "i2c1_fck", &i2c1_fck),
4372 CLK(NULL, "mcbsp5_fck", &mcbsp5_fck),
4373 CLK(NULL, "mcbsp1_fck", &mcbsp1_fck),
4374 CLK(NULL, "core_48m_fck", &core_48m_fck),
4375 CLK(NULL, "mcspi4_fck", &mcspi4_fck),
4376 CLK(NULL, "mcspi3_fck", &mcspi3_fck),
4377 CLK(NULL, "mcspi2_fck", &mcspi2_fck),
4378 CLK(NULL, "mcspi1_fck", &mcspi1_fck),
4379 CLK(NULL, "uart2_fck", &uart2_fck),
4380 CLK(NULL, "uart1_fck", &uart1_fck),
4381 CLK(NULL, "core_12m_fck", &core_12m_fck),
4382 CLK("omap_hdq.0", "fck", &hdq_fck),
4383 CLK(NULL, "hdq_fck", &hdq_fck),
4384 CLK(NULL, "core_l3_ick", &core_l3_ick),
4385 CLK(NULL, "sdrc_ick", &sdrc_ick),
4386 CLK(NULL, "gpmc_fck", &gpmc_fck),
4387 CLK(NULL, "core_l4_ick", &core_l4_ick),
4388 CLK("omap_hsmmc.1", "ick", &mmchs2_ick),
4389 CLK("omap_hsmmc.0", "ick", &mmchs1_ick),
4390 CLK(NULL, "mmchs2_ick", &mmchs2_ick),
4391 CLK(NULL, "mmchs1_ick", &mmchs1_ick),
4392 CLK("omap_hdq.0", "ick", &hdq_ick),
4393 CLK(NULL, "hdq_ick", &hdq_ick),
4394 CLK("omap2_mcspi.4", "ick", &mcspi4_ick),
4395 CLK("omap2_mcspi.3", "ick", &mcspi3_ick),
4396 CLK("omap2_mcspi.2", "ick", &mcspi2_ick),
4397 CLK("omap2_mcspi.1", "ick", &mcspi1_ick),
4398 CLK(NULL, "mcspi4_ick", &mcspi4_ick),
4399 CLK(NULL, "mcspi3_ick", &mcspi3_ick),
4400 CLK(NULL, "mcspi2_ick", &mcspi2_ick),
4401 CLK(NULL, "mcspi1_ick", &mcspi1_ick),
4402 CLK("omap_i2c.3", "ick", &i2c3_ick),
4403 CLK("omap_i2c.2", "ick", &i2c2_ick),
4404 CLK("omap_i2c.1", "ick", &i2c1_ick),
4405 CLK(NULL, "i2c3_ick", &i2c3_ick),
4406 CLK(NULL, "i2c2_ick", &i2c2_ick),
4407 CLK(NULL, "i2c1_ick", &i2c1_ick),
4408 CLK(NULL, "uart2_ick", &uart2_ick),
4409 CLK(NULL, "uart1_ick", &uart1_ick),
4410 CLK(NULL, "gpt11_ick", &gpt11_ick),
4411 CLK(NULL, "gpt10_ick", &gpt10_ick),
4412 CLK("omap-mcbsp.5", "ick", &mcbsp5_ick),
4413 CLK("omap-mcbsp.1", "ick", &mcbsp1_ick),
4414 CLK(NULL, "mcbsp5_ick", &mcbsp5_ick),
4415 CLK(NULL, "mcbsp1_ick", &mcbsp1_ick),
4416 CLK(NULL, "omapctrl_ick", &omapctrl_ick),
4417 CLK(NULL, "dss_tv_fck", &dss_tv_fck),
4418 CLK(NULL, "dss_96m_fck", &dss_96m_fck),
4419 CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck),
4420 CLK(NULL, "init_60m_fclk", &dummy_ck),
4421 CLK(NULL, "gpt1_fck", &gpt1_fck),
4422 CLK(NULL, "aes2_ick", &aes2_ick),
4423 CLK(NULL, "wkup_32k_fck", &wkup_32k_fck),
4424 CLK(NULL, "gpio1_dbck", &gpio1_dbck),
4425 CLK(NULL, "sha12_ick", &sha12_ick),
4426 CLK(NULL, "wdt2_fck", &wdt2_fck),
4427 CLK(NULL, "wkup_l4_ick", &wkup_l4_ick),
4428 CLK("omap_wdt", "ick", &wdt2_ick),
4429 CLK(NULL, "wdt2_ick", &wdt2_ick),
4430 CLK(NULL, "wdt1_ick", &wdt1_ick),
4431 CLK(NULL, "gpio1_ick", &gpio1_ick),
4432 CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick),
4433 CLK(NULL, "gpt12_ick", &gpt12_ick),
4434 CLK(NULL, "gpt1_ick", &gpt1_ick),
4435 CLK(NULL, "per_96m_fck", &per_96m_fck),
4436 CLK(NULL, "per_48m_fck", &per_48m_fck),
4437 CLK(NULL, "uart3_fck", &uart3_fck),
4438 CLK(NULL, "gpt2_fck", &gpt2_fck),
4439 CLK(NULL, "gpt3_fck", &gpt3_fck),
4440 CLK(NULL, "gpt4_fck", &gpt4_fck),
4441 CLK(NULL, "gpt5_fck", &gpt5_fck),
4442 CLK(NULL, "gpt6_fck", &gpt6_fck),
4443 CLK(NULL, "gpt7_fck", &gpt7_fck),
4444 CLK(NULL, "gpt8_fck", &gpt8_fck),
4445 CLK(NULL, "gpt9_fck", &gpt9_fck),
4446 CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck),
4447 CLK(NULL, "gpio6_dbck", &gpio6_dbck),
4448 CLK(NULL, "gpio5_dbck", &gpio5_dbck),
4449 CLK(NULL, "gpio4_dbck", &gpio4_dbck),
4450 CLK(NULL, "gpio3_dbck", &gpio3_dbck),
4451 CLK(NULL, "gpio2_dbck", &gpio2_dbck),
4452 CLK(NULL, "wdt3_fck", &wdt3_fck),
4453 CLK(NULL, "per_l4_ick", &per_l4_ick),
4454 CLK(NULL, "gpio6_ick", &gpio6_ick),
4455 CLK(NULL, "gpio5_ick", &gpio5_ick),
4456 CLK(NULL, "gpio4_ick", &gpio4_ick),
4457 CLK(NULL, "gpio3_ick", &gpio3_ick),
4458 CLK(NULL, "gpio2_ick", &gpio2_ick),
4459 CLK(NULL, "wdt3_ick", &wdt3_ick),
4460 CLK(NULL, "uart3_ick", &uart3_ick),
4461 CLK(NULL, "uart4_ick", &uart4_ick),
4462 CLK(NULL, "gpt9_ick", &gpt9_ick),
4463 CLK(NULL, "gpt8_ick", &gpt8_ick),
4464 CLK(NULL, "gpt7_ick", &gpt7_ick),
4465 CLK(NULL, "gpt6_ick", &gpt6_ick),
4466 CLK(NULL, "gpt5_ick", &gpt5_ick),
4467 CLK(NULL, "gpt4_ick", &gpt4_ick),
4468 CLK(NULL, "gpt3_ick", &gpt3_ick),
4469 CLK(NULL, "gpt2_ick", &gpt2_ick),
4470 CLK("omap-mcbsp.2", "ick", &mcbsp2_ick),
4471 CLK("omap-mcbsp.3", "ick", &mcbsp3_ick),
4472 CLK("omap-mcbsp.4", "ick", &mcbsp4_ick),
4473 CLK(NULL, "mcbsp4_ick", &mcbsp2_ick),
4474 CLK(NULL, "mcbsp3_ick", &mcbsp3_ick),
4475 CLK(NULL, "mcbsp2_ick", &mcbsp4_ick),
4476 CLK(NULL, "mcbsp2_fck", &mcbsp2_fck),
4477 CLK(NULL, "mcbsp3_fck", &mcbsp3_fck),
4478 CLK(NULL, "mcbsp4_fck", &mcbsp4_fck),
4479 CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck),
4480 CLK("etb", "emu_src_ck", &emu_src_ck),
4481 CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck),
4482 CLK(NULL, "emu_src_ck", &emu_src_ck),
4483 CLK(NULL, "pclk_fck", &pclk_fck),
4484 CLK(NULL, "pclkx2_fck", &pclkx2_fck),
4485 CLK(NULL, "atclk_fck", &atclk_fck),
4486 CLK(NULL, "traceclk_src_fck", &traceclk_src_fck),
4487 CLK(NULL, "traceclk_fck", &traceclk_fck),
4488 CLK(NULL, "secure_32k_fck", &secure_32k_fck),
4489 CLK(NULL, "gpt12_fck", &gpt12_fck),
4490 CLK(NULL, "wdt1_fck", &wdt1_fck),
4491 { NULL },
4492};
4493
4494static struct ti_clk_alias omap36xx_am35xx_omap3430es2plus_clks[] = {
4495 CLK(NULL, "dpll5_ck", &dpll5_ck),
4496 CLK(NULL, "dpll5_m2_ck", &dpll5_m2_ck),
4497 CLK(NULL, "core_d3_ck", &core_d3_ck),
4498 CLK(NULL, "core_d4_ck", &core_d4_ck),
4499 CLK(NULL, "core_d6_ck", &core_d6_ck),
4500 CLK(NULL, "omap_192m_alwon_fck", &omap_192m_alwon_fck),
4501 CLK(NULL, "core_d2_ck", &core_d2_ck),
4502 CLK(NULL, "corex2_d3_fck", &corex2_d3_fck),
4503 CLK(NULL, "corex2_d5_fck", &corex2_d5_fck),
4504 CLK(NULL, "sgx_fck", &sgx_fck),
4505 CLK(NULL, "sgx_ick", &sgx_ick),
4506 CLK(NULL, "cpefuse_fck", &cpefuse_fck),
4507 CLK(NULL, "ts_fck", &ts_fck),
4508 CLK(NULL, "usbtll_fck", &usbtll_fck),
4509 CLK(NULL, "usbtll_ick", &usbtll_ick),
4510 CLK("omap_hsmmc.2", "ick", &mmchs3_ick),
4511 CLK(NULL, "mmchs3_ick", &mmchs3_ick),
4512 CLK(NULL, "mmchs3_fck", &mmchs3_fck),
4513 CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es2),
4514 CLK("omapdss_dss", "ick", &dss_ick_3430es2),
4515 CLK(NULL, "dss_ick", &dss_ick_3430es2),
4516 CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck),
4517 CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck),
4518 CLK(NULL, "usbhost_ick", &usbhost_ick),
4519 { NULL },
4520};
4521
4522static struct ti_clk_alias omap3430es1_clks[] = {
4523 CLK(NULL, "gfx_l3_ck", &gfx_l3_ck),
4524 CLK(NULL, "gfx_l3_fck", &gfx_l3_fck),
4525 CLK(NULL, "gfx_l3_ick", &gfx_l3_ick),
4526 CLK(NULL, "gfx_cg1_ck", &gfx_cg1_ck),
4527 CLK(NULL, "gfx_cg2_ck", &gfx_cg2_ck),
4528 CLK(NULL, "d2d_26m_fck", &d2d_26m_fck),
4529 CLK(NULL, "fshostusb_fck", &fshostusb_fck),
4530 CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1),
4531 CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1),
4532 CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es1),
4533 CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es1),
4534 CLK(NULL, "fac_ick", &fac_ick),
4535 CLK(NULL, "ssi_ick", &ssi_ick_3430es1),
4536 CLK(NULL, "usb_l4_ick", &usb_l4_ick),
4537 CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es1),
4538 CLK("omapdss_dss", "ick", &dss_ick_3430es1),
4539 CLK(NULL, "dss_ick", &dss_ick_3430es1),
4540 { NULL },
4541};
4542
4543static struct ti_clk_alias omap36xx_clks[] = {
4544 CLK(NULL, "uart4_fck", &uart4_fck),
4545 { NULL },
4546};
4547
4548static struct ti_clk_alias am35xx_clks[] = {
4549 CLK(NULL, "ipss_ick", &ipss_ick),
4550 CLK(NULL, "rmii_ck", &rmii_ck),
4551 CLK(NULL, "pclk_ck", &pclk_ck),
4552 CLK(NULL, "emac_ick", &emac_ick),
4553 CLK(NULL, "emac_fck", &emac_fck),
4554 CLK("davinci_emac.0", NULL, &emac_ick),
4555 CLK("davinci_mdio.0", NULL, &emac_fck),
4556 CLK("vpfe-capture", "master", &vpfe_ick),
4557 CLK("vpfe-capture", "slave", &vpfe_fck),
4558 CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_am35xx),
4559 CLK(NULL, "hsotgusb_fck", &hsotgusb_fck_am35xx),
4560 CLK(NULL, "hecc_ck", &hecc_ck),
4561 CLK(NULL, "uart4_ick", &uart4_ick_am35xx),
4562 CLK(NULL, "uart4_fck", &uart4_fck_am35xx),
4563 { NULL },
4564};
4565
4566static struct ti_clk *omap36xx_clk_patches[] = {
4567 &dpll4_m3x2_ck_omap36xx,
4568 &dpll3_m3x2_ck_omap36xx,
4569 &dpll4_m6x2_ck_omap36xx,
4570 &dpll4_m2x2_ck_omap36xx,
4571 &dpll4_m5x2_ck_omap36xx,
4572 &dpll4_ck_omap36xx,
4573 NULL,
4574};
4575
4576static const char *enable_init_clks[] = {
4577 "sdrc_ick",
4578 "gpmc_fck",
4579 "omapctrl_ick",
4580};
4581
4582static void __init omap3_clk_legacy_common_init(void)
4583{
4584 omap2_clk_disable_autoidle_all();
4585
4586 omap2_clk_enable_init_clocks(enable_init_clks,
4587 ARRAY_SIZE(enable_init_clks));
4588
4589 pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
4590 (clk_get_rate(osc_sys_ck.clk) / 1000000),
4591 (clk_get_rate(osc_sys_ck.clk) / 100000) % 10,
4592 (clk_get_rate(core_ck.clk) / 1000000),
4593 (clk_get_rate(arm_fck.clk) / 1000000));
4594}
4595
4596int __init omap3430es1_clk_legacy_init(void)
4597{
4598 int r;
4599
4600 r = ti_clk_register_legacy_clks(omap3430es1_clks);
4601 r |= ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
4602 r |= ti_clk_register_legacy_clks(omap3xxx_clks);
4603
4604 omap3_clk_legacy_common_init();
4605
4606 return r;
4607}
4608
4609int __init omap3430_clk_legacy_init(void)
4610{
4611 int r;
4612
4613 r = ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
4614 r |= ti_clk_register_legacy_clks(omap36xx_omap3430es2plus_clks);
4615 r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
4616 r |= ti_clk_register_legacy_clks(omap3xxx_clks);
4617
4618 omap3_clk_legacy_common_init();
4619 omap3_clk_lock_dpll5();
4620
4621 return r;
4622}
4623
4624int __init omap36xx_clk_legacy_init(void)
4625{
4626 int r;
4627
4628 ti_clk_patch_legacy_clks(omap36xx_clk_patches);
4629 r = ti_clk_register_legacy_clks(omap36xx_clks);
4630 r |= ti_clk_register_legacy_clks(omap36xx_omap3430es2plus_clks);
4631 r |= ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
4632 r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
4633 r |= ti_clk_register_legacy_clks(omap3xxx_clks);
4634
4635 omap3_clk_legacy_common_init();
4636 omap3_clk_lock_dpll5();
4637
4638 return r;
4639}
4640
4641int __init am35xx_clk_legacy_init(void)
4642{
4643 int r;
4644
4645 r = ti_clk_register_legacy_clks(am35xx_clks);
4646 r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
4647 r |= ti_clk_register_legacy_clks(omap3xxx_clks);
4648
4649 omap3_clk_legacy_common_init();
4650 omap3_clk_lock_dpll5();
4651
4652 return r;
4653}
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
index 0d1750a8aea4..383a06e49b09 100644
--- a/drivers/clk/ti/clk-3xxx.c
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -327,7 +327,6 @@ enum {
327 OMAP3_SOC_OMAP3430_ES1, 327 OMAP3_SOC_OMAP3430_ES1,
328 OMAP3_SOC_OMAP3430_ES2_PLUS, 328 OMAP3_SOC_OMAP3430_ES2_PLUS,
329 OMAP3_SOC_OMAP3630, 329 OMAP3_SOC_OMAP3630,
330 OMAP3_SOC_TI81XX,
331}; 330};
332 331
333static int __init omap3xxx_dt_clk_init(int soc_type) 332static int __init omap3xxx_dt_clk_init(int soc_type)
@@ -370,7 +369,7 @@ static int __init omap3xxx_dt_clk_init(int soc_type)
370 (clk_get_rate(clk_get_sys(NULL, "core_ck")) / 1000000), 369 (clk_get_rate(clk_get_sys(NULL, "core_ck")) / 1000000),
371 (clk_get_rate(clk_get_sys(NULL, "arm_fck")) / 1000000)); 370 (clk_get_rate(clk_get_sys(NULL, "arm_fck")) / 1000000));
372 371
373 if (soc_type != OMAP3_SOC_TI81XX && soc_type != OMAP3_SOC_OMAP3430_ES1) 372 if (soc_type != OMAP3_SOC_OMAP3430_ES1)
374 omap3_clk_lock_dpll5(); 373 omap3_clk_lock_dpll5();
375 374
376 return 0; 375 return 0;
@@ -390,8 +389,3 @@ int __init am35xx_dt_clk_init(void)
390{ 389{
391 return omap3xxx_dt_clk_init(OMAP3_SOC_AM35XX); 390 return omap3xxx_dt_clk_init(OMAP3_SOC_AM35XX);
392} 391}
393
394int __init ti81xx_dt_clk_init(void)
395{
396 return omap3xxx_dt_clk_init(OMAP3_SOC_TI81XX);
397}
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index 02517a8206bd..4f4c87751db5 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/clk-private.h> 15#include <linux/clk.h>
16#include <linux/clkdev.h> 16#include <linux/clkdev.h>
17#include <linux/clk/ti.h> 17#include <linux/clk/ti.h>
18 18
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 5e183993e3ec..14160b223548 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/clk-private.h> 15#include <linux/clk.h>
16#include <linux/clkdev.h> 16#include <linux/clkdev.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/clk/ti.h> 18#include <linux/clk/ti.h>
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 62ac8f6e480c..ee32f4deebf4 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/clk-private.h> 15#include <linux/clk.h>
16#include <linux/clkdev.h> 16#include <linux/clkdev.h>
17#include <linux/clk/ti.h> 17#include <linux/clk/ti.h>
18 18
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
new file mode 100644
index 000000000000..9451e651a1ff
--- /dev/null
+++ b/drivers/clk/ti/clk-816x.c
@@ -0,0 +1,53 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License as
4 * published by the Free Software Foundation version 2.
5 *
6 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
7 * kind, whether express or implied; without even the implied warranty
8 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 */
11
12#include <linux/kernel.h>
13#include <linux/list.h>
14#include <linux/clk-provider.h>
15#include <linux/clk/ti.h>
16
17static struct ti_dt_clk dm816x_clks[] = {
18 DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
19 DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
20 DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
21 DT_CLK(NULL, "mpu_ck", "mpu_ck"),
22 DT_CLK(NULL, "timer1_fck", "timer1_fck"),
23 DT_CLK(NULL, "timer2_fck", "timer2_fck"),
24 DT_CLK(NULL, "timer3_fck", "timer3_fck"),
25 DT_CLK(NULL, "timer4_fck", "timer4_fck"),
26 DT_CLK(NULL, "timer5_fck", "timer5_fck"),
27 DT_CLK(NULL, "timer6_fck", "timer6_fck"),
28 DT_CLK(NULL, "timer7_fck", "timer7_fck"),
29 DT_CLK(NULL, "sysclk4_ck", "sysclk4_ck"),
30 DT_CLK(NULL, "sysclk5_ck", "sysclk5_ck"),
31 DT_CLK(NULL, "sysclk6_ck", "sysclk6_ck"),
32 DT_CLK(NULL, "sysclk10_ck", "sysclk10_ck"),
33 DT_CLK(NULL, "sysclk18_ck", "sysclk18_ck"),
34 DT_CLK(NULL, "sysclk24_ck", "sysclk24_ck"),
35 DT_CLK("4a100000.ethernet", "sysclk24_ck", "sysclk24_ck"),
36 { .node_name = NULL },
37};
38
39static const char *enable_init_clks[] = {
40 "ddr_pll_clk1",
41 "ddr_pll_clk2",
42 "ddr_pll_clk3",
43};
44
45int __init ti81xx_dt_clk_init(void)
46{
47 ti_dt_clocks_register(dm816x_clks);
48 omap2_clk_disable_autoidle_all();
49 omap2_clk_enable_init_clocks(enable_init_clks,
50 ARRAY_SIZE(enable_init_clks));
51
52 return 0;
53}
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 337abe5909e1..e22b95646e09 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -22,6 +22,8 @@
22#include <linux/of_address.h> 22#include <linux/of_address.h>
23#include <linux/list.h> 23#include <linux/list.h>
24 24
25#include "clock.h"
26
25#undef pr_fmt 27#undef pr_fmt
26#define pr_fmt(fmt) "%s: " fmt, __func__ 28#define pr_fmt(fmt) "%s: " fmt, __func__
27 29
@@ -183,3 +185,128 @@ void ti_dt_clk_init_retry_clks(void)
183 retries--; 185 retries--;
184 } 186 }
185} 187}
188
189#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
190void __init ti_clk_patch_legacy_clks(struct ti_clk **patch)
191{
192 while (*patch) {
193 memcpy((*patch)->patch, *patch, sizeof(**patch));
194 patch++;
195 }
196}
197
198struct clk __init *ti_clk_register_clk(struct ti_clk *setup)
199{
200 struct clk *clk;
201 struct ti_clk_fixed *fixed;
202 struct ti_clk_fixed_factor *fixed_factor;
203 struct clk_hw *clk_hw;
204
205 if (setup->clk)
206 return setup->clk;
207
208 switch (setup->type) {
209 case TI_CLK_FIXED:
210 fixed = setup->data;
211
212 clk = clk_register_fixed_rate(NULL, setup->name, NULL,
213 CLK_IS_ROOT, fixed->frequency);
214 break;
215 case TI_CLK_MUX:
216 clk = ti_clk_register_mux(setup);
217 break;
218 case TI_CLK_DIVIDER:
219 clk = ti_clk_register_divider(setup);
220 break;
221 case TI_CLK_COMPOSITE:
222 clk = ti_clk_register_composite(setup);
223 break;
224 case TI_CLK_FIXED_FACTOR:
225 fixed_factor = setup->data;
226
227 clk = clk_register_fixed_factor(NULL, setup->name,
228 fixed_factor->parent,
229 0, fixed_factor->mult,
230 fixed_factor->div);
231 break;
232 case TI_CLK_GATE:
233 clk = ti_clk_register_gate(setup);
234 break;
235 case TI_CLK_DPLL:
236 clk = ti_clk_register_dpll(setup);
237 break;
238 default:
239 pr_err("bad type for %s!\n", setup->name);
240 clk = ERR_PTR(-EINVAL);
241 }
242
243 if (!IS_ERR(clk)) {
244 setup->clk = clk;
245 if (setup->clkdm_name) {
246 if (__clk_get_flags(clk) & CLK_IS_BASIC) {
247 pr_warn("can't setup clkdm for basic clk %s\n",
248 setup->name);
249 } else {
250 clk_hw = __clk_get_hw(clk);
251 to_clk_hw_omap(clk_hw)->clkdm_name =
252 setup->clkdm_name;
253 omap2_init_clk_clkdm(clk_hw);
254 }
255 }
256 }
257
258 return clk;
259}
260
261int __init ti_clk_register_legacy_clks(struct ti_clk_alias *clks)
262{
263 struct clk *clk;
264 bool retry;
265 struct ti_clk_alias *retry_clk;
266 struct ti_clk_alias *tmp;
267
268 while (clks->clk) {
269 clk = ti_clk_register_clk(clks->clk);
270 if (IS_ERR(clk)) {
271 if (PTR_ERR(clk) == -EAGAIN) {
272 list_add(&clks->link, &retry_list);
273 } else {
274 pr_err("register for %s failed: %ld\n",
275 clks->clk->name, PTR_ERR(clk));
276 return PTR_ERR(clk);
277 }
278 } else {
279 clks->lk.clk = clk;
280 clkdev_add(&clks->lk);
281 }
282 clks++;
283 }
284
285 retry = true;
286
287 while (!list_empty(&retry_list) && retry) {
288 retry = false;
289 list_for_each_entry_safe(retry_clk, tmp, &retry_list, link) {
290 pr_debug("retry-init: %s\n", retry_clk->clk->name);
291 clk = ti_clk_register_clk(retry_clk->clk);
292 if (IS_ERR(clk)) {
293 if (PTR_ERR(clk) == -EAGAIN) {
294 continue;
295 } else {
296 pr_err("register for %s failed: %ld\n",
297 retry_clk->clk->name,
298 PTR_ERR(clk));
299 return PTR_ERR(clk);
300 }
301 } else {
302 retry = true;
303 retry_clk->lk.clk = clk;
304 clkdev_add(&retry_clk->lk);
305 list_del(&retry_clk->link);
306 }
307 }
308 }
309
310 return 0;
311}
312#endif
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
new file mode 100644
index 000000000000..404158d2d7f8
--- /dev/null
+++ b/drivers/clk/ti/clock.h
@@ -0,0 +1,172 @@
1/*
2 * TI Clock driver internal definitions
3 *
4 * Copyright (C) 2014 Texas Instruments, Inc
5 * Tero Kristo (t-kristo@ti.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation version 2.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16#ifndef __DRIVERS_CLK_TI_CLOCK__
17#define __DRIVERS_CLK_TI_CLOCK__
18
19enum {
20 TI_CLK_FIXED,
21 TI_CLK_MUX,
22 TI_CLK_DIVIDER,
23 TI_CLK_COMPOSITE,
24 TI_CLK_FIXED_FACTOR,
25 TI_CLK_GATE,
26 TI_CLK_DPLL,
27};
28
29/* Global flags */
30#define CLKF_INDEX_POWER_OF_TWO (1 << 0)
31#define CLKF_INDEX_STARTS_AT_ONE (1 << 1)
32#define CLKF_SET_RATE_PARENT (1 << 2)
33#define CLKF_OMAP3 (1 << 3)
34#define CLKF_AM35XX (1 << 4)
35
36/* Gate flags */
37#define CLKF_SET_BIT_TO_DISABLE (1 << 5)
38#define CLKF_INTERFACE (1 << 6)
39#define CLKF_SSI (1 << 7)
40#define CLKF_DSS (1 << 8)
41#define CLKF_HSOTGUSB (1 << 9)
42#define CLKF_WAIT (1 << 10)
43#define CLKF_NO_WAIT (1 << 11)
44#define CLKF_HSDIV (1 << 12)
45#define CLKF_CLKDM (1 << 13)
46
47/* DPLL flags */
48#define CLKF_LOW_POWER_STOP (1 << 5)
49#define CLKF_LOCK (1 << 6)
50#define CLKF_LOW_POWER_BYPASS (1 << 7)
51#define CLKF_PER (1 << 8)
52#define CLKF_CORE (1 << 9)
53#define CLKF_J_TYPE (1 << 10)
54
55#define CLK(dev, con, ck) \
56 { \
57 .lk = { \
58 .dev_id = dev, \
59 .con_id = con, \
60 }, \
61 .clk = ck, \
62 }
63
64struct ti_clk {
65 const char *name;
66 const char *clkdm_name;
67 int type;
68 void *data;
69 struct ti_clk *patch;
70 struct clk *clk;
71};
72
73struct ti_clk_alias {
74 struct ti_clk *clk;
75 struct clk_lookup lk;
76 struct list_head link;
77};
78
79struct ti_clk_fixed {
80 u32 frequency;
81 u16 flags;
82};
83
84struct ti_clk_mux {
85 u8 bit_shift;
86 int num_parents;
87 u16 reg;
88 u8 module;
89 const char **parents;
90 u16 flags;
91};
92
93struct ti_clk_divider {
94 const char *parent;
95 u8 bit_shift;
96 u16 max_div;
97 u16 reg;
98 u8 module;
99 int *dividers;
100 int num_dividers;
101 u16 flags;
102};
103
104struct ti_clk_fixed_factor {
105 const char *parent;
106 u16 div;
107 u16 mult;
108 u16 flags;
109};
110
111struct ti_clk_gate {
112 const char *parent;
113 u8 bit_shift;
114 u16 reg;
115 u8 module;
116 u16 flags;
117};
118
119struct ti_clk_composite {
120 struct ti_clk_divider *divider;
121 struct ti_clk_mux *mux;
122 struct ti_clk_gate *gate;
123 u16 flags;
124};
125
126struct ti_clk_clkdm_gate {
127 const char *parent;
128 u16 flags;
129};
130
131struct ti_clk_dpll {
132 int num_parents;
133 u16 control_reg;
134 u16 idlest_reg;
135 u16 autoidle_reg;
136 u16 mult_div1_reg;
137 u8 module;
138 const char **parents;
139 u16 flags;
140 u8 modes;
141 u32 mult_mask;
142 u32 div1_mask;
143 u32 enable_mask;
144 u32 autoidle_mask;
145 u32 freqsel_mask;
146 u32 idlest_mask;
147 u32 dco_mask;
148 u32 sddiv_mask;
149 u16 max_multiplier;
150 u16 max_divider;
151 u8 min_divider;
152 u8 auto_recal_bit;
153 u8 recal_en_bit;
154 u8 recal_st_bit;
155};
156
157struct clk *ti_clk_register_gate(struct ti_clk *setup);
158struct clk *ti_clk_register_interface(struct ti_clk *setup);
159struct clk *ti_clk_register_mux(struct ti_clk *setup);
160struct clk *ti_clk_register_divider(struct ti_clk *setup);
161struct clk *ti_clk_register_composite(struct ti_clk *setup);
162struct clk *ti_clk_register_dpll(struct ti_clk *setup);
163
164struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup);
165struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup);
166struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup);
167
168void ti_clk_patch_legacy_clks(struct ti_clk **patch);
169struct clk *ti_clk_register_clk(struct ti_clk *setup);
170int ti_clk_register_legacy_clks(struct ti_clk_alias *clks);
171
172#endif
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index 19d8980ba458..3654f61912eb 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -23,6 +23,8 @@
23#include <linux/clk/ti.h> 23#include <linux/clk/ti.h>
24#include <linux/list.h> 24#include <linux/list.h>
25 25
26#include "clock.h"
27
26#undef pr_fmt 28#undef pr_fmt
27#define pr_fmt(fmt) "%s: " fmt, __func__ 29#define pr_fmt(fmt) "%s: " fmt, __func__
28 30
@@ -116,8 +118,46 @@ static inline struct clk_hw *_get_hw(struct clk_hw_omap_comp *clk, int idx)
116 118
117#define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw) 119#define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw)
118 120
119static void __init ti_clk_register_composite(struct clk_hw *hw, 121#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
120 struct device_node *node) 122struct clk *ti_clk_register_composite(struct ti_clk *setup)
123{
124 struct ti_clk_composite *comp;
125 struct clk_hw *gate;
126 struct clk_hw *mux;
127 struct clk_hw *div;
128 int num_parents = 1;
129 const char **parent_names = NULL;
130 struct clk *clk;
131
132 comp = setup->data;
133
134 div = ti_clk_build_component_div(comp->divider);
135 gate = ti_clk_build_component_gate(comp->gate);
136 mux = ti_clk_build_component_mux(comp->mux);
137
138 if (div)
139 parent_names = &comp->divider->parent;
140
141 if (gate)
142 parent_names = &comp->gate->parent;
143
144 if (mux) {
145 num_parents = comp->mux->num_parents;
146 parent_names = comp->mux->parents;
147 }
148
149 clk = clk_register_composite(NULL, setup->name,
150 parent_names, num_parents, mux,
151 &ti_clk_mux_ops, div,
152 &ti_composite_divider_ops, gate,
153 &ti_composite_gate_ops, 0);
154
155 return clk;
156}
157#endif
158
159static void __init _register_composite(struct clk_hw *hw,
160 struct device_node *node)
121{ 161{
122 struct clk *clk; 162 struct clk *clk;
123 struct clk_hw_omap_comp *cclk = to_clk_hw_comp(hw); 163 struct clk_hw_omap_comp *cclk = to_clk_hw_comp(hw);
@@ -136,7 +176,7 @@ static void __init ti_clk_register_composite(struct clk_hw *hw,
136 pr_debug("component %s not ready for %s, retry\n", 176 pr_debug("component %s not ready for %s, retry\n",
137 cclk->comp_nodes[i]->name, node->name); 177 cclk->comp_nodes[i]->name, node->name);
138 if (!ti_clk_retry_init(node, hw, 178 if (!ti_clk_retry_init(node, hw,
139 ti_clk_register_composite)) 179 _register_composite))
140 return; 180 return;
141 181
142 goto cleanup; 182 goto cleanup;
@@ -216,7 +256,7 @@ static void __init of_ti_composite_clk_setup(struct device_node *node)
216 for (i = 0; i < num_clks; i++) 256 for (i = 0; i < num_clks; i++)
217 cclk->comp_nodes[i] = _get_component_node(node, i); 257 cclk->comp_nodes[i] = _get_component_node(node, i);
218 258
219 ti_clk_register_composite(&cclk->hw, node); 259 _register_composite(&cclk->hw, node);
220} 260}
221CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock", 261CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock",
222 of_ti_composite_clk_setup); 262 of_ti_composite_clk_setup);
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index bff2b5b8ff59..6211893c0980 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -21,6 +21,7 @@
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_address.h> 22#include <linux/of_address.h>
23#include <linux/clk/ti.h> 23#include <linux/clk/ti.h>
24#include "clock.h"
24 25
25#undef pr_fmt 26#undef pr_fmt
26#define pr_fmt(fmt) "%s: " fmt, __func__ 27#define pr_fmt(fmt) "%s: " fmt, __func__
@@ -301,6 +302,134 @@ static struct clk *_register_divider(struct device *dev, const char *name,
301} 302}
302 303
303static struct clk_div_table * 304static struct clk_div_table *
305_get_div_table_from_setup(struct ti_clk_divider *setup, u8 *width)
306{
307 int valid_div = 0;
308 struct clk_div_table *table;
309 int i;
310 int div;
311 u32 val;
312 u8 flags;
313
314 if (!setup->num_dividers) {
315 /* Clk divider table not provided, determine min/max divs */
316 flags = setup->flags;
317
318 if (flags & CLKF_INDEX_STARTS_AT_ONE)
319 val = 1;
320 else
321 val = 0;
322
323 div = 1;
324
325 while (div < setup->max_div) {
326 if (flags & CLKF_INDEX_POWER_OF_TWO)
327 div <<= 1;
328 else
329 div++;
330 val++;
331 }
332
333 *width = fls(val);
334
335 return NULL;
336 }
337
338 for (i = 0; i < setup->num_dividers; i++)
339 if (setup->dividers[i])
340 valid_div++;
341
342 table = kzalloc(sizeof(*table) * (valid_div + 1), GFP_KERNEL);
343 if (!table)
344 return ERR_PTR(-ENOMEM);
345
346 valid_div = 0;
347 *width = 0;
348
349 for (i = 0; i < setup->num_dividers; i++)
350 if (setup->dividers[i]) {
351 table[valid_div].div = setup->dividers[i];
352 table[valid_div].val = i;
353 valid_div++;
354 *width = i;
355 }
356
357 *width = fls(*width);
358
359 return table;
360}
361
362struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
363{
364 struct clk_divider *div;
365 struct clk_omap_reg *reg;
366
367 if (!setup)
368 return NULL;
369
370 div = kzalloc(sizeof(*div), GFP_KERNEL);
371 if (!div)
372 return ERR_PTR(-ENOMEM);
373
374 reg = (struct clk_omap_reg *)&div->reg;
375 reg->index = setup->module;
376 reg->offset = setup->reg;
377
378 if (setup->flags & CLKF_INDEX_STARTS_AT_ONE)
379 div->flags |= CLK_DIVIDER_ONE_BASED;
380
381 if (setup->flags & CLKF_INDEX_POWER_OF_TWO)
382 div->flags |= CLK_DIVIDER_POWER_OF_TWO;
383
384 div->table = _get_div_table_from_setup(setup, &div->width);
385
386 div->shift = setup->bit_shift;
387
388 return &div->hw;
389}
390
391struct clk *ti_clk_register_divider(struct ti_clk *setup)
392{
393 struct ti_clk_divider *div;
394 struct clk_omap_reg *reg_setup;
395 u32 reg;
396 u8 width;
397 u32 flags = 0;
398 u8 div_flags = 0;
399 struct clk_div_table *table;
400 struct clk *clk;
401
402 div = setup->data;
403
404 reg_setup = (struct clk_omap_reg *)&reg;
405
406 reg_setup->index = div->module;
407 reg_setup->offset = div->reg;
408
409 if (div->flags & CLKF_INDEX_STARTS_AT_ONE)
410 div_flags |= CLK_DIVIDER_ONE_BASED;
411
412 if (div->flags & CLKF_INDEX_POWER_OF_TWO)
413 div_flags |= CLK_DIVIDER_POWER_OF_TWO;
414
415 if (div->flags & CLKF_SET_RATE_PARENT)
416 flags |= CLK_SET_RATE_PARENT;
417
418 table = _get_div_table_from_setup(div, &width);
419 if (IS_ERR(table))
420 return (struct clk *)table;
421
422 clk = _register_divider(NULL, setup->name, div->parent,
423 flags, (void __iomem *)reg, div->bit_shift,
424 width, div_flags, table, NULL);
425
426 if (IS_ERR(clk))
427 kfree(table);
428
429 return clk;
430}
431
432static struct clk_div_table *
304__init ti_clk_get_div_table(struct device_node *node) 433__init ti_clk_get_div_table(struct device_node *node)
305{ 434{
306 struct clk_div_table *table; 435 struct clk_div_table *table;
@@ -455,7 +584,8 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
455 goto cleanup; 584 goto cleanup;
456 585
457 clk = _register_divider(NULL, node->name, parent_name, flags, reg, 586 clk = _register_divider(NULL, node->name, parent_name, flags, reg,
458 shift, width, clk_divider_flags, table, NULL); 587 shift, width, clk_divider_flags, table,
588 NULL);
459 589
460 if (!IS_ERR(clk)) { 590 if (!IS_ERR(clk)) {
461 of_clk_add_provider(node, of_clk_src_simple_get, clk); 591 of_clk_add_provider(node, of_clk_src_simple_get, clk);
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 85ac0dd501de..81dc4698dc41 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -21,6 +21,7 @@
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_address.h> 22#include <linux/of_address.h>
23#include <linux/clk/ti.h> 23#include <linux/clk/ti.h>
24#include "clock.h"
24 25
25#undef pr_fmt 26#undef pr_fmt
26#define pr_fmt(fmt) "%s: " fmt, __func__ 27#define pr_fmt(fmt) "%s: " fmt, __func__
@@ -130,7 +131,7 @@ static const struct clk_ops dpll_x2_ck_ops = {
130}; 131};
131 132
132/** 133/**
133 * ti_clk_register_dpll - low level registration of a DPLL clock 134 * _register_dpll - low level registration of a DPLL clock
134 * @hw: hardware clock definition for the clock 135 * @hw: hardware clock definition for the clock
135 * @node: device node for the clock 136 * @node: device node for the clock
136 * 137 *
@@ -138,8 +139,8 @@ static const struct clk_ops dpll_x2_ck_ops = {
138 * clk-bypass is missing), the clock is added to retry list and 139 * clk-bypass is missing), the clock is added to retry list and
139 * the initialization is retried on later stage. 140 * the initialization is retried on later stage.
140 */ 141 */
141static void __init ti_clk_register_dpll(struct clk_hw *hw, 142static void __init _register_dpll(struct clk_hw *hw,
142 struct device_node *node) 143 struct device_node *node)
143{ 144{
144 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw); 145 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
145 struct dpll_data *dd = clk_hw->dpll_data; 146 struct dpll_data *dd = clk_hw->dpll_data;
@@ -151,7 +152,7 @@ static void __init ti_clk_register_dpll(struct clk_hw *hw,
151 if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) { 152 if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) {
152 pr_debug("clk-ref or clk-bypass missing for %s, retry later\n", 153 pr_debug("clk-ref or clk-bypass missing for %s, retry later\n",
153 node->name); 154 node->name);
154 if (!ti_clk_retry_init(node, hw, ti_clk_register_dpll)) 155 if (!ti_clk_retry_init(node, hw, _register_dpll))
155 return; 156 return;
156 157
157 goto cleanup; 158 goto cleanup;
@@ -175,20 +176,118 @@ cleanup:
175 kfree(clk_hw); 176 kfree(clk_hw);
176} 177}
177 178
179#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
180void __iomem *_get_reg(u8 module, u16 offset)
181{
182 u32 reg;
183 struct clk_omap_reg *reg_setup;
184
185 reg_setup = (struct clk_omap_reg *)&reg;
186
187 reg_setup->index = module;
188 reg_setup->offset = offset;
189
190 return (void __iomem *)reg;
191}
192
193struct clk *ti_clk_register_dpll(struct ti_clk *setup)
194{
195 struct clk_hw_omap *clk_hw;
196 struct clk_init_data init = { NULL };
197 struct dpll_data *dd;
198 struct clk *clk;
199 struct ti_clk_dpll *dpll;
200 const struct clk_ops *ops = &omap3_dpll_ck_ops;
201 struct clk *clk_ref;
202 struct clk *clk_bypass;
203
204 dpll = setup->data;
205
206 if (dpll->num_parents < 2)
207 return ERR_PTR(-EINVAL);
208
209 clk_ref = clk_get_sys(NULL, dpll->parents[0]);
210 clk_bypass = clk_get_sys(NULL, dpll->parents[1]);
211
212 if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass))
213 return ERR_PTR(-EAGAIN);
214
215 dd = kzalloc(sizeof(*dd), GFP_KERNEL);
216 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
217 if (!dd || !clk_hw) {
218 clk = ERR_PTR(-ENOMEM);
219 goto cleanup;
220 }
221
222 clk_hw->dpll_data = dd;
223 clk_hw->ops = &clkhwops_omap3_dpll;
224 clk_hw->hw.init = &init;
225 clk_hw->flags = MEMMAP_ADDRESSING;
226
227 init.name = setup->name;
228 init.ops = ops;
229
230 init.num_parents = dpll->num_parents;
231 init.parent_names = dpll->parents;
232
233 dd->control_reg = _get_reg(dpll->module, dpll->control_reg);
234 dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg);
235 dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg);
236 dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg);
237
238 dd->modes = dpll->modes;
239 dd->div1_mask = dpll->div1_mask;
240 dd->idlest_mask = dpll->idlest_mask;
241 dd->mult_mask = dpll->mult_mask;
242 dd->autoidle_mask = dpll->autoidle_mask;
243 dd->enable_mask = dpll->enable_mask;
244 dd->sddiv_mask = dpll->sddiv_mask;
245 dd->dco_mask = dpll->dco_mask;
246 dd->max_divider = dpll->max_divider;
247 dd->min_divider = dpll->min_divider;
248 dd->max_multiplier = dpll->max_multiplier;
249 dd->auto_recal_bit = dpll->auto_recal_bit;
250 dd->recal_en_bit = dpll->recal_en_bit;
251 dd->recal_st_bit = dpll->recal_st_bit;
252
253 dd->clk_ref = clk_ref;
254 dd->clk_bypass = clk_bypass;
255
256 if (dpll->flags & CLKF_CORE)
257 ops = &omap3_dpll_core_ck_ops;
258
259 if (dpll->flags & CLKF_PER)
260 ops = &omap3_dpll_per_ck_ops;
261
262 if (dpll->flags & CLKF_J_TYPE)
263 dd->flags |= DPLL_J_TYPE;
264
265 clk = clk_register(NULL, &clk_hw->hw);
266
267 if (!IS_ERR(clk))
268 return clk;
269
270cleanup:
271 kfree(dd);
272 kfree(clk_hw);
273 return clk;
274}
275#endif
276
178#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ 277#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
179 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \ 278 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
180 defined(CONFIG_SOC_AM43XX) 279 defined(CONFIG_SOC_AM43XX)
181/** 280/**
182 * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock 281 * _register_dpll_x2 - Registers a DPLLx2 clock
183 * @node: device node for this clock 282 * @node: device node for this clock
184 * @ops: clk_ops for this clock 283 * @ops: clk_ops for this clock
185 * @hw_ops: clk_hw_ops for this clock 284 * @hw_ops: clk_hw_ops for this clock
186 * 285 *
187 * Initializes a DPLL x 2 clock from device tree data. 286 * Initializes a DPLL x 2 clock from device tree data.
188 */ 287 */
189static void ti_clk_register_dpll_x2(struct device_node *node, 288static void _register_dpll_x2(struct device_node *node,
190 const struct clk_ops *ops, 289 const struct clk_ops *ops,
191 const struct clk_hw_omap_ops *hw_ops) 290 const struct clk_hw_omap_ops *hw_ops)
192{ 291{
193 struct clk *clk; 292 struct clk *clk;
194 struct clk_init_data init = { NULL }; 293 struct clk_init_data init = { NULL };
@@ -318,7 +417,7 @@ static void __init of_ti_dpll_setup(struct device_node *node,
318 if (dpll_mode) 417 if (dpll_mode)
319 dd->modes = dpll_mode; 418 dd->modes = dpll_mode;
320 419
321 ti_clk_register_dpll(&clk_hw->hw, node); 420 _register_dpll(&clk_hw->hw, node);
322 return; 421 return;
323 422
324cleanup: 423cleanup:
@@ -332,7 +431,7 @@ cleanup:
332 defined(CONFIG_SOC_DRA7XX) 431 defined(CONFIG_SOC_DRA7XX)
333static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node) 432static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
334{ 433{
335 ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx); 434 _register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
336} 435}
337CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock", 436CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
338 of_ti_omap4_dpll_x2_setup); 437 of_ti_omap4_dpll_x2_setup);
@@ -341,7 +440,7 @@ CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
341#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX) 440#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
342static void __init of_ti_am3_dpll_x2_setup(struct device_node *node) 441static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
343{ 442{
344 ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL); 443 _register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
345} 444}
346CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock", 445CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
347 of_ti_am3_dpll_x2_setup); 446 of_ti_am3_dpll_x2_setup);
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
new file mode 100644
index 000000000000..6ef89639a9f6
--- /dev/null
+++ b/drivers/clk/ti/fapll.c
@@ -0,0 +1,410 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License as
4 * published by the Free Software Foundation version 2.
5 *
6 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
7 * kind, whether express or implied; without even the implied warranty
8 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 */
11
12#include <linux/clk-provider.h>
13#include <linux/delay.h>
14#include <linux/slab.h>
15#include <linux/err.h>
16#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/clk/ti.h>
19#include <asm/div64.h>
20
21/* FAPLL Control Register PLL_CTRL */
22#define FAPLL_MAIN_LOCK BIT(7)
23#define FAPLL_MAIN_PLLEN BIT(3)
24#define FAPLL_MAIN_BP BIT(2)
25#define FAPLL_MAIN_LOC_CTL BIT(0)
26
27/* FAPLL powerdown register PWD */
28#define FAPLL_PWD_OFFSET 4
29
30#define MAX_FAPLL_OUTPUTS 7
31#define FAPLL_MAX_RETRIES 1000
32
33#define to_fapll(_hw) container_of(_hw, struct fapll_data, hw)
34#define to_synth(_hw) container_of(_hw, struct fapll_synth, hw)
35
36/* The bypass bit is inverted on the ddr_pll.. */
37#define fapll_is_ddr_pll(va) (((u32)(va) & 0xffff) == 0x0440)
38
39/*
40 * The audio_pll_clk1 input is hard wired to the 27MHz bypass clock,
41 * and the audio_pll_clk1 synthesizer is hardwared to 32KiHz output.
42 */
43#define is_ddr_pll_clk1(va) (((u32)(va) & 0xffff) == 0x044c)
44#define is_audio_pll_clk1(va) (((u32)(va) & 0xffff) == 0x04a8)
45
46/* Synthesizer divider register */
47#define SYNTH_LDMDIV1 BIT(8)
48
49/* Synthesizer frequency register */
50#define SYNTH_LDFREQ BIT(31)
51
52struct fapll_data {
53 struct clk_hw hw;
54 void __iomem *base;
55 const char *name;
56 struct clk *clk_ref;
57 struct clk *clk_bypass;
58 struct clk_onecell_data outputs;
59 bool bypass_bit_inverted;
60};
61
62struct fapll_synth {
63 struct clk_hw hw;
64 struct fapll_data *fd;
65 int index;
66 void __iomem *freq;
67 void __iomem *div;
68 const char *name;
69 struct clk *clk_pll;
70};
71
72static bool ti_fapll_clock_is_bypass(struct fapll_data *fd)
73{
74 u32 v = readl_relaxed(fd->base);
75
76 if (fd->bypass_bit_inverted)
77 return !(v & FAPLL_MAIN_BP);
78 else
79 return !!(v & FAPLL_MAIN_BP);
80}
81
82static int ti_fapll_enable(struct clk_hw *hw)
83{
84 struct fapll_data *fd = to_fapll(hw);
85 u32 v = readl_relaxed(fd->base);
86
87 v |= (1 << FAPLL_MAIN_PLLEN);
88 writel_relaxed(v, fd->base);
89
90 return 0;
91}
92
93static void ti_fapll_disable(struct clk_hw *hw)
94{
95 struct fapll_data *fd = to_fapll(hw);
96 u32 v = readl_relaxed(fd->base);
97
98 v &= ~(1 << FAPLL_MAIN_PLLEN);
99 writel_relaxed(v, fd->base);
100}
101
102static int ti_fapll_is_enabled(struct clk_hw *hw)
103{
104 struct fapll_data *fd = to_fapll(hw);
105 u32 v = readl_relaxed(fd->base);
106
107 return v & (1 << FAPLL_MAIN_PLLEN);
108}
109
110static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
111 unsigned long parent_rate)
112{
113 struct fapll_data *fd = to_fapll(hw);
114 u32 fapll_n, fapll_p, v;
115 long long rate;
116
117 if (ti_fapll_clock_is_bypass(fd))
118 return parent_rate;
119
120 rate = parent_rate;
121
122 /* PLL pre-divider is P and multiplier is N */
123 v = readl_relaxed(fd->base);
124 fapll_p = (v >> 8) & 0xff;
125 if (fapll_p)
126 do_div(rate, fapll_p);
127 fapll_n = v >> 16;
128 if (fapll_n)
129 rate *= fapll_n;
130
131 return rate;
132}
133
134static u8 ti_fapll_get_parent(struct clk_hw *hw)
135{
136 struct fapll_data *fd = to_fapll(hw);
137
138 if (ti_fapll_clock_is_bypass(fd))
139 return 1;
140
141 return 0;
142}
143
144static struct clk_ops ti_fapll_ops = {
145 .enable = ti_fapll_enable,
146 .disable = ti_fapll_disable,
147 .is_enabled = ti_fapll_is_enabled,
148 .recalc_rate = ti_fapll_recalc_rate,
149 .get_parent = ti_fapll_get_parent,
150};
151
152static int ti_fapll_synth_enable(struct clk_hw *hw)
153{
154 struct fapll_synth *synth = to_synth(hw);
155 u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
156
157 v &= ~(1 << synth->index);
158 writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
159
160 return 0;
161}
162
163static void ti_fapll_synth_disable(struct clk_hw *hw)
164{
165 struct fapll_synth *synth = to_synth(hw);
166 u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
167
168 v |= 1 << synth->index;
169 writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
170}
171
172static int ti_fapll_synth_is_enabled(struct clk_hw *hw)
173{
174 struct fapll_synth *synth = to_synth(hw);
175 u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
176
177 return !(v & (1 << synth->index));
178}
179
180/*
181 * See dm816x TRM chapter 1.10.3 Flying Adder PLL fore more info
182 */
183static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
184 unsigned long parent_rate)
185{
186 struct fapll_synth *synth = to_synth(hw);
187 u32 synth_div_m;
188 long long rate;
189
190 /* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */
191 if (!synth->div)
192 return 32768;
193
194 /*
195 * PLL in bypass sets the synths in bypass mode too. The PLL rate
196 * can be also be set to 27MHz, so we can't use parent_rate to
197 * check for bypass mode.
198 */
199 if (ti_fapll_clock_is_bypass(synth->fd))
200 return parent_rate;
201
202 rate = parent_rate;
203
204 /*
205 * Synth frequency integer and fractional divider.
206 * Note that the phase output K is 8, so the result needs
207 * to be multiplied by 8.
208 */
209 if (synth->freq) {
210 u32 v, synth_int_div, synth_frac_div, synth_div_freq;
211
212 v = readl_relaxed(synth->freq);
213 synth_int_div = (v >> 24) & 0xf;
214 synth_frac_div = v & 0xffffff;
215 synth_div_freq = (synth_int_div * 10000000) + synth_frac_div;
216 rate *= 10000000;
217 do_div(rate, synth_div_freq);
218 rate *= 8;
219 }
220
221 /* Synth ost-divider M */
222 synth_div_m = readl_relaxed(synth->div) & 0xff;
223 do_div(rate, synth_div_m);
224
225 return rate;
226}
227
228static struct clk_ops ti_fapll_synt_ops = {
229 .enable = ti_fapll_synth_enable,
230 .disable = ti_fapll_synth_disable,
231 .is_enabled = ti_fapll_synth_is_enabled,
232 .recalc_rate = ti_fapll_synth_recalc_rate,
233};
234
235static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
236 void __iomem *freq,
237 void __iomem *div,
238 int index,
239 const char *name,
240 const char *parent,
241 struct clk *pll_clk)
242{
243 struct clk_init_data *init;
244 struct fapll_synth *synth;
245
246 init = kzalloc(sizeof(*init), GFP_KERNEL);
247 if (!init)
248 return ERR_PTR(-ENOMEM);
249
250 init->ops = &ti_fapll_synt_ops;
251 init->name = name;
252 init->parent_names = &parent;
253 init->num_parents = 1;
254
255 synth = kzalloc(sizeof(*synth), GFP_KERNEL);
256 if (!synth)
257 goto free;
258
259 synth->fd = fd;
260 synth->index = index;
261 synth->freq = freq;
262 synth->div = div;
263 synth->name = name;
264 synth->hw.init = init;
265 synth->clk_pll = pll_clk;
266
267 return clk_register(NULL, &synth->hw);
268
269free:
270 kfree(synth);
271 kfree(init);
272
273 return ERR_PTR(-ENOMEM);
274}
275
276static void __init ti_fapll_setup(struct device_node *node)
277{
278 struct fapll_data *fd;
279 struct clk_init_data *init = NULL;
280 const char *parent_name[2];
281 struct clk *pll_clk;
282 int i;
283
284 fd = kzalloc(sizeof(*fd), GFP_KERNEL);
285 if (!fd)
286 return;
287
288 fd->outputs.clks = kzalloc(sizeof(struct clk *) *
289 MAX_FAPLL_OUTPUTS + 1,
290 GFP_KERNEL);
291 if (!fd->outputs.clks)
292 goto free;
293
294 init = kzalloc(sizeof(*init), GFP_KERNEL);
295 if (!init)
296 goto free;
297
298 init->ops = &ti_fapll_ops;
299 init->name = node->name;
300
301 init->num_parents = of_clk_get_parent_count(node);
302 if (init->num_parents != 2) {
303 pr_err("%s must have two parents\n", node->name);
304 goto free;
305 }
306
307 parent_name[0] = of_clk_get_parent_name(node, 0);
308 parent_name[1] = of_clk_get_parent_name(node, 1);
309 init->parent_names = parent_name;
310
311 fd->clk_ref = of_clk_get(node, 0);
312 if (IS_ERR(fd->clk_ref)) {
313 pr_err("%s could not get clk_ref\n", node->name);
314 goto free;
315 }
316
317 fd->clk_bypass = of_clk_get(node, 1);
318 if (IS_ERR(fd->clk_bypass)) {
319 pr_err("%s could not get clk_bypass\n", node->name);
320 goto free;
321 }
322
323 fd->base = of_iomap(node, 0);
324 if (!fd->base) {
325 pr_err("%s could not get IO base\n", node->name);
326 goto free;
327 }
328
329 if (fapll_is_ddr_pll(fd->base))
330 fd->bypass_bit_inverted = true;
331
332 fd->name = node->name;
333 fd->hw.init = init;
334
335 /* Register the parent PLL */
336 pll_clk = clk_register(NULL, &fd->hw);
337 if (IS_ERR(pll_clk))
338 goto unmap;
339
340 fd->outputs.clks[0] = pll_clk;
341 fd->outputs.clk_num++;
342
343 /*
344 * Set up the child synthesizers starting at index 1 as the
345 * PLL output is at index 0. We need to check the clock-indices
346 * for numbering in case there are holes in the synth mapping,
347 * and then probe the synth register to see if it has a FREQ
348 * register available.
349 */
350 for (i = 0; i < MAX_FAPLL_OUTPUTS; i++) {
351 const char *output_name;
352 void __iomem *freq, *div;
353 struct clk *synth_clk;
354 int output_instance;
355 u32 v;
356
357 if (of_property_read_string_index(node, "clock-output-names",
358 i, &output_name))
359 continue;
360
361 if (of_property_read_u32_index(node, "clock-indices", i,
362 &output_instance))
363 output_instance = i;
364
365 freq = fd->base + (output_instance * 8);
366 div = freq + 4;
367
368 /* Check for hardwired audio_pll_clk1 */
369 if (is_audio_pll_clk1(freq)) {
370 freq = 0;
371 div = 0;
372 } else {
373 /* Does the synthesizer have a FREQ register? */
374 v = readl_relaxed(freq);
375 if (!v)
376 freq = 0;
377 }
378 synth_clk = ti_fapll_synth_setup(fd, freq, div, output_instance,
379 output_name, node->name,
380 pll_clk);
381 if (IS_ERR(synth_clk))
382 continue;
383
384 fd->outputs.clks[output_instance] = synth_clk;
385 fd->outputs.clk_num++;
386
387 clk_register_clkdev(synth_clk, output_name, NULL);
388 }
389
390 /* Register the child synthesizers as the FAPLL outputs */
391 of_clk_add_provider(node, of_clk_src_onecell_get, &fd->outputs);
392 /* Add clock alias for the outputs */
393
394 kfree(init);
395
396 return;
397
398unmap:
399 iounmap(fd->base);
400free:
401 if (fd->clk_bypass)
402 clk_put(fd->clk_bypass);
403 if (fd->clk_ref)
404 clk_put(fd->clk_ref);
405 kfree(fd->outputs.clks);
406 kfree(fd);
407 kfree(init);
408}
409
410CLK_OF_DECLARE(ti_fapll_clock, "ti,dm816-fapll-clock", ti_fapll_setup);
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index b326d2797feb..d493307b73f4 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -22,6 +22,8 @@
22#include <linux/of_address.h> 22#include <linux/of_address.h>
23#include <linux/clk/ti.h> 23#include <linux/clk/ti.h>
24 24
25#include "clock.h"
26
25#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) 27#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
26 28
27#undef pr_fmt 29#undef pr_fmt
@@ -90,63 +92,164 @@ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
90 return ret; 92 return ret;
91} 93}
92 94
93static void __init _of_ti_gate_clk_setup(struct device_node *node, 95static struct clk *_register_gate(struct device *dev, const char *name,
94 const struct clk_ops *ops, 96 const char *parent_name, unsigned long flags,
95 const struct clk_hw_omap_ops *hw_ops) 97 void __iomem *reg, u8 bit_idx,
98 u8 clk_gate_flags, const struct clk_ops *ops,
99 const struct clk_hw_omap_ops *hw_ops)
96{ 100{
97 struct clk *clk;
98 struct clk_init_data init = { NULL }; 101 struct clk_init_data init = { NULL };
99 struct clk_hw_omap *clk_hw; 102 struct clk_hw_omap *clk_hw;
100 const char *clk_name = node->name; 103 struct clk *clk;
101 const char *parent_name;
102 u32 val;
103 104
104 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); 105 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
105 if (!clk_hw) 106 if (!clk_hw)
106 return; 107 return ERR_PTR(-ENOMEM);
107 108
108 clk_hw->hw.init = &init; 109 clk_hw->hw.init = &init;
109 110
110 init.name = clk_name; 111 init.name = name;
111 init.ops = ops; 112 init.ops = ops;
112 113
113 if (ops != &omap_gate_clkdm_clk_ops) { 114 clk_hw->enable_reg = reg;
114 clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0); 115 clk_hw->enable_bit = bit_idx;
115 if (!clk_hw->enable_reg) 116 clk_hw->ops = hw_ops;
116 goto cleanup;
117 117
118 if (!of_property_read_u32(node, "ti,bit-shift", &val)) 118 clk_hw->flags = MEMMAP_ADDRESSING | clk_gate_flags;
119 clk_hw->enable_bit = val; 119
120 init.parent_names = &parent_name;
121 init.num_parents = 1;
122
123 init.flags = flags;
124
125 clk = clk_register(NULL, &clk_hw->hw);
126
127 if (IS_ERR(clk))
128 kfree(clk_hw);
129
130 return clk;
131}
132
133#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
134struct clk *ti_clk_register_gate(struct ti_clk *setup)
135{
136 const struct clk_ops *ops = &omap_gate_clk_ops;
137 const struct clk_hw_omap_ops *hw_ops = NULL;
138 u32 reg;
139 struct clk_omap_reg *reg_setup;
140 u32 flags = 0;
141 u8 clk_gate_flags = 0;
142 struct ti_clk_gate *gate;
143
144 gate = setup->data;
145
146 if (gate->flags & CLKF_INTERFACE)
147 return ti_clk_register_interface(setup);
148
149 reg_setup = (struct clk_omap_reg *)&reg;
150
151 if (gate->flags & CLKF_SET_RATE_PARENT)
152 flags |= CLK_SET_RATE_PARENT;
153
154 if (gate->flags & CLKF_SET_BIT_TO_DISABLE)
155 clk_gate_flags |= INVERT_ENABLE;
156
157 if (gate->flags & CLKF_HSDIV) {
158 ops = &omap_gate_clk_hsdiv_restore_ops;
159 hw_ops = &clkhwops_wait;
120 } 160 }
121 161
122 clk_hw->ops = hw_ops; 162 if (gate->flags & CLKF_DSS)
163 hw_ops = &clkhwops_omap3430es2_dss_usbhost_wait;
164
165 if (gate->flags & CLKF_WAIT)
166 hw_ops = &clkhwops_wait;
167
168 if (gate->flags & CLKF_CLKDM)
169 ops = &omap_gate_clkdm_clk_ops;
170
171 if (gate->flags & CLKF_AM35XX)
172 hw_ops = &clkhwops_am35xx_ipss_module_wait;
123 173
124 clk_hw->flags = MEMMAP_ADDRESSING; 174 reg_setup->index = gate->module;
175 reg_setup->offset = gate->reg;
176
177 return _register_gate(NULL, setup->name, gate->parent, flags,
178 (void __iomem *)reg, gate->bit_shift,
179 clk_gate_flags, ops, hw_ops);
180}
181
182struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup)
183{
184 struct clk_hw_omap *gate;
185 struct clk_omap_reg *reg;
186 const struct clk_hw_omap_ops *ops = &clkhwops_wait;
187
188 if (!setup)
189 return NULL;
190
191 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
192 if (!gate)
193 return ERR_PTR(-ENOMEM);
194
195 reg = (struct clk_omap_reg *)&gate->enable_reg;
196 reg->index = setup->module;
197 reg->offset = setup->reg;
198
199 gate->enable_bit = setup->bit_shift;
200
201 if (setup->flags & CLKF_NO_WAIT)
202 ops = NULL;
203
204 if (setup->flags & CLKF_INTERFACE)
205 ops = &clkhwops_iclk_wait;
206
207 gate->ops = ops;
208 gate->flags = MEMMAP_ADDRESSING;
209
210 return &gate->hw;
211}
212#endif
213
214static void __init _of_ti_gate_clk_setup(struct device_node *node,
215 const struct clk_ops *ops,
216 const struct clk_hw_omap_ops *hw_ops)
217{
218 struct clk *clk;
219 const char *parent_name;
220 void __iomem *reg = NULL;
221 u8 enable_bit = 0;
222 u32 val;
223 u32 flags = 0;
224 u8 clk_gate_flags = 0;
225
226 if (ops != &omap_gate_clkdm_clk_ops) {
227 reg = ti_clk_get_reg_addr(node, 0);
228 if (!reg)
229 return;
230
231 if (!of_property_read_u32(node, "ti,bit-shift", &val))
232 enable_bit = val;
233 }
125 234
126 if (of_clk_get_parent_count(node) != 1) { 235 if (of_clk_get_parent_count(node) != 1) {
127 pr_err("%s must have 1 parent\n", clk_name); 236 pr_err("%s must have 1 parent\n", node->name);
128 goto cleanup; 237 return;
129 } 238 }
130 239
131 parent_name = of_clk_get_parent_name(node, 0); 240 parent_name = of_clk_get_parent_name(node, 0);
132 init.parent_names = &parent_name;
133 init.num_parents = 1;
134 241
135 if (of_property_read_bool(node, "ti,set-rate-parent")) 242 if (of_property_read_bool(node, "ti,set-rate-parent"))
136 init.flags |= CLK_SET_RATE_PARENT; 243 flags |= CLK_SET_RATE_PARENT;
137 244
138 if (of_property_read_bool(node, "ti,set-bit-to-disable")) 245 if (of_property_read_bool(node, "ti,set-bit-to-disable"))
139 clk_hw->flags |= INVERT_ENABLE; 246 clk_gate_flags |= INVERT_ENABLE;
140 247
141 clk = clk_register(NULL, &clk_hw->hw); 248 clk = _register_gate(NULL, node->name, parent_name, flags, reg,
249 enable_bit, clk_gate_flags, ops, hw_ops);
142 250
143 if (!IS_ERR(clk)) { 251 if (!IS_ERR(clk))
144 of_clk_add_provider(node, of_clk_src_simple_get, clk); 252 of_clk_add_provider(node, of_clk_src_simple_get, clk);
145 return;
146 }
147
148cleanup:
149 kfree(clk_hw);
150} 253}
151 254
152static void __init 255static void __init
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index 9c3e8c4aaa40..265d91f071c5 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -20,6 +20,7 @@
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/of_address.h> 21#include <linux/of_address.h>
22#include <linux/clk/ti.h> 22#include <linux/clk/ti.h>
23#include "clock.h"
23 24
24#undef pr_fmt 25#undef pr_fmt
25#define pr_fmt(fmt) "%s: " fmt, __func__ 26#define pr_fmt(fmt) "%s: " fmt, __func__
@@ -31,53 +32,102 @@ static const struct clk_ops ti_interface_clk_ops = {
31 .is_enabled = &omap2_dflt_clk_is_enabled, 32 .is_enabled = &omap2_dflt_clk_is_enabled,
32}; 33};
33 34
34static void __init _of_ti_interface_clk_setup(struct device_node *node, 35static struct clk *_register_interface(struct device *dev, const char *name,
35 const struct clk_hw_omap_ops *ops) 36 const char *parent_name,
37 void __iomem *reg, u8 bit_idx,
38 const struct clk_hw_omap_ops *ops)
36{ 39{
37 struct clk *clk;
38 struct clk_init_data init = { NULL }; 40 struct clk_init_data init = { NULL };
39 struct clk_hw_omap *clk_hw; 41 struct clk_hw_omap *clk_hw;
40 const char *parent_name; 42 struct clk *clk;
41 u32 val;
42 43
43 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); 44 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
44 if (!clk_hw) 45 if (!clk_hw)
45 return; 46 return ERR_PTR(-ENOMEM);
46 47
47 clk_hw->hw.init = &init; 48 clk_hw->hw.init = &init;
48 clk_hw->ops = ops; 49 clk_hw->ops = ops;
49 clk_hw->flags = MEMMAP_ADDRESSING; 50 clk_hw->flags = MEMMAP_ADDRESSING;
51 clk_hw->enable_reg = reg;
52 clk_hw->enable_bit = bit_idx;
50 53
51 clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0); 54 init.name = name;
52 if (!clk_hw->enable_reg)
53 goto cleanup;
54
55 if (!of_property_read_u32(node, "ti,bit-shift", &val))
56 clk_hw->enable_bit = val;
57
58 init.name = node->name;
59 init.ops = &ti_interface_clk_ops; 55 init.ops = &ti_interface_clk_ops;
60 init.flags = 0; 56 init.flags = 0;
61 57
62 parent_name = of_clk_get_parent_name(node, 0);
63 if (!parent_name) {
64 pr_err("%s must have a parent\n", node->name);
65 goto cleanup;
66 }
67
68 init.num_parents = 1; 58 init.num_parents = 1;
69 init.parent_names = &parent_name; 59 init.parent_names = &parent_name;
70 60
71 clk = clk_register(NULL, &clk_hw->hw); 61 clk = clk_register(NULL, &clk_hw->hw);
72 62
73 if (!IS_ERR(clk)) { 63 if (IS_ERR(clk))
74 of_clk_add_provider(node, of_clk_src_simple_get, clk); 64 kfree(clk_hw);
65 else
75 omap2_init_clk_hw_omap_clocks(clk); 66 omap2_init_clk_hw_omap_clocks(clk);
67
68 return clk;
69}
70
71#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
72struct clk *ti_clk_register_interface(struct ti_clk *setup)
73{
74 const struct clk_hw_omap_ops *ops = &clkhwops_iclk_wait;
75 u32 reg;
76 struct clk_omap_reg *reg_setup;
77 struct ti_clk_gate *gate;
78
79 gate = setup->data;
80 reg_setup = (struct clk_omap_reg *)&reg;
81 reg_setup->index = gate->module;
82 reg_setup->offset = gate->reg;
83
84 if (gate->flags & CLKF_NO_WAIT)
85 ops = &clkhwops_iclk;
86
87 if (gate->flags & CLKF_HSOTGUSB)
88 ops = &clkhwops_omap3430es2_iclk_hsotgusb_wait;
89
90 if (gate->flags & CLKF_DSS)
91 ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait;
92
93 if (gate->flags & CLKF_SSI)
94 ops = &clkhwops_omap3430es2_iclk_ssi_wait;
95
96 if (gate->flags & CLKF_AM35XX)
97 ops = &clkhwops_am35xx_ipss_wait;
98
99 return _register_interface(NULL, setup->name, gate->parent,
100 (void __iomem *)reg, gate->bit_shift, ops);
101}
102#endif
103
104static void __init _of_ti_interface_clk_setup(struct device_node *node,
105 const struct clk_hw_omap_ops *ops)
106{
107 struct clk *clk;
108 const char *parent_name;
109 void __iomem *reg;
110 u8 enable_bit = 0;
111 u32 val;
112
113 reg = ti_clk_get_reg_addr(node, 0);
114 if (!reg)
115 return;
116
117 if (!of_property_read_u32(node, "ti,bit-shift", &val))
118 enable_bit = val;
119
120 parent_name = of_clk_get_parent_name(node, 0);
121 if (!parent_name) {
122 pr_err("%s must have a parent\n", node->name);
76 return; 123 return;
77 } 124 }
78 125
79cleanup: 126 clk = _register_interface(NULL, node->name, parent_name, reg,
80 kfree(clk_hw); 127 enable_bit, ops);
128
129 if (!IS_ERR(clk))
130 of_clk_add_provider(node, of_clk_src_simple_get, clk);
81} 131}
82 132
83static void __init of_ti_interface_clk_setup(struct device_node *node) 133static void __init of_ti_interface_clk_setup(struct device_node *node)
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index e9d650e51287..728e253606bc 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -21,6 +21,7 @@
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_address.h> 22#include <linux/of_address.h>
23#include <linux/clk/ti.h> 23#include <linux/clk/ti.h>
24#include "clock.h"
24 25
25#undef pr_fmt 26#undef pr_fmt
26#define pr_fmt(fmt) "%s: " fmt, __func__ 27#define pr_fmt(fmt) "%s: " fmt, __func__
@@ -144,6 +145,39 @@ static struct clk *_register_mux(struct device *dev, const char *name,
144 return clk; 145 return clk;
145} 146}
146 147
148struct clk *ti_clk_register_mux(struct ti_clk *setup)
149{
150 struct ti_clk_mux *mux;
151 u32 flags;
152 u8 mux_flags = 0;
153 struct clk_omap_reg *reg_setup;
154 u32 reg;
155 u32 mask;
156
157 reg_setup = (struct clk_omap_reg *)&reg;
158
159 mux = setup->data;
160 flags = CLK_SET_RATE_NO_REPARENT;
161
162 mask = mux->num_parents;
163 if (!(mux->flags & CLKF_INDEX_STARTS_AT_ONE))
164 mask--;
165
166 mask = (1 << fls(mask)) - 1;
167 reg_setup->index = mux->module;
168 reg_setup->offset = mux->reg;
169
170 if (mux->flags & CLKF_INDEX_STARTS_AT_ONE)
171 mux_flags |= CLK_MUX_INDEX_ONE;
172
173 if (mux->flags & CLKF_SET_RATE_PARENT)
174 flags |= CLK_SET_RATE_PARENT;
175
176 return _register_mux(NULL, setup->name, mux->parents, mux->num_parents,
177 flags, (void __iomem *)reg, mux->bit_shift, mask,
178 mux_flags, NULL, NULL);
179}
180
147/** 181/**
148 * of_mux_clk_setup - Setup function for simple mux rate clock 182 * of_mux_clk_setup - Setup function for simple mux rate clock
149 * @node: DT node for the clock 183 * @node: DT node for the clock
@@ -194,8 +228,9 @@ static void of_mux_clk_setup(struct device_node *node)
194 228
195 mask = (1 << fls(mask)) - 1; 229 mask = (1 << fls(mask)) - 1;
196 230
197 clk = _register_mux(NULL, node->name, parent_names, num_parents, flags, 231 clk = _register_mux(NULL, node->name, parent_names, num_parents,
198 reg, shift, mask, clk_mux_flags, NULL, NULL); 232 flags, reg, shift, mask, clk_mux_flags, NULL,
233 NULL);
199 234
200 if (!IS_ERR(clk)) 235 if (!IS_ERR(clk))
201 of_clk_add_provider(node, of_clk_src_simple_get, clk); 236 of_clk_add_provider(node, of_clk_src_simple_get, clk);
@@ -205,6 +240,37 @@ cleanup:
205} 240}
206CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup); 241CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup);
207 242
243struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup)
244{
245 struct clk_mux *mux;
246 struct clk_omap_reg *reg;
247 int num_parents;
248
249 if (!setup)
250 return NULL;
251
252 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
253 if (!mux)
254 return ERR_PTR(-ENOMEM);
255
256 reg = (struct clk_omap_reg *)&mux->reg;
257
258 mux->shift = setup->bit_shift;
259
260 reg->index = setup->module;
261 reg->offset = setup->reg;
262
263 if (setup->flags & CLKF_INDEX_STARTS_AT_ONE)
264 mux->flags |= CLK_MUX_INDEX_ONE;
265
266 num_parents = setup->num_parents;
267
268 mux->mask = num_parents - 1;
269 mux->mask = (1 << fls(mux->mask)) - 1;
270
271 return &mux->hw;
272}
273
208static void __init of_ti_composite_mux_clk_setup(struct device_node *node) 274static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
209{ 275{
210 struct clk_mux *mux; 276 struct clk_mux *mux;
diff --git a/drivers/clk/ux500/clk-prcc.c b/drivers/clk/ux500/clk-prcc.c
index bd4769a84485..0e950769ed03 100644
--- a/drivers/clk/ux500/clk-prcc.c
+++ b/drivers/clk/ux500/clk-prcc.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/clk-provider.h> 10#include <linux/clk-provider.h>
11#include <linux/clk-private.h>
12#include <linux/slab.h> 11#include <linux/slab.h>
13#include <linux/io.h> 12#include <linux/io.h>
14#include <linux/err.h> 13#include <linux/err.h>
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index e2d63bc47436..bf63c96acb1a 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/clk-provider.h> 10#include <linux/clk-provider.h>
11#include <linux/clk-private.h>
12#include <linux/mfd/dbx500-prcmu.h> 11#include <linux/mfd/dbx500-prcmu.h>
13#include <linux/slab.h> 12#include <linux/slab.h>
14#include <linux/io.h> 13#include <linux/io.h>
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 9037bebd69f7..f870aad57711 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -303,6 +303,7 @@ static void __init zynq_clk_setup(struct device_node *np)
303 clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x], 303 clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
304 "cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL, 304 "cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
305 26, 0, &armclk_lock); 305 26, 0, &armclk_lock);
306 clk_prepare_enable(clks[cpu_2x]);
306 307
307 clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1, 308 clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
308 4 + 2 * tmp); 309 4 + 2 * tmp);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 1c2506f68122..68161f7a07d6 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -63,6 +63,11 @@ config VT8500_TIMER
63config CADENCE_TTC_TIMER 63config CADENCE_TTC_TIMER
64 bool 64 bool
65 65
66config ASM9260_TIMER
67 bool
68 select CLKSRC_MMIO
69 select CLKSRC_OF
70
66config CLKSRC_NOMADIK_MTU 71config CLKSRC_NOMADIK_MTU
67 bool 72 bool
68 depends on (ARCH_NOMADIK || ARCH_U8500) 73 depends on (ARCH_NOMADIK || ARCH_U8500)
@@ -245,15 +250,4 @@ config CLKSRC_PXA
245 help 250 help
246 This enables OST0 support available on PXA and SA-11x0 251 This enables OST0 support available on PXA and SA-11x0
247 platforms. 252 platforms.
248
249config ASM9260_TIMER
250 bool "Alphascale ASM9260 timer driver"
251 depends on GENERIC_CLOCKEVENTS
252 select CLKSRC_MMIO
253 select CLKSRC_OF
254 default y if MACH_ASM9260
255 help
256 This enables build of a clocksource and clockevent driver for
257 the 32-bit System Timer hardware available on a Alphascale ASM9260.
258
259endmenu 253endmenu
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index 32a3d25795d3..68ab42356d0e 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -224,6 +224,8 @@ static void __init mtk_timer_init(struct device_node *node)
224 } 224 }
225 rate = clk_get_rate(clk); 225 rate = clk_get_rate(clk);
226 226
227 mtk_timer_global_reset(evt);
228
227 if (request_irq(evt->dev.irq, mtk_timer_interrupt, 229 if (request_irq(evt->dev.irq, mtk_timer_interrupt,
228 IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { 230 IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
229 pr_warn("failed to setup irq %d\n", evt->dev.irq); 231 pr_warn("failed to setup irq %d\n", evt->dev.irq);
@@ -232,8 +234,6 @@ static void __init mtk_timer_init(struct device_node *node)
232 234
233 evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); 235 evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
234 236
235 mtk_timer_global_reset(evt);
236
237 /* Configure clock source */ 237 /* Configure clock source */
238 mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN); 238 mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
239 clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC), 239 clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
@@ -241,10 +241,11 @@ static void __init mtk_timer_init(struct device_node *node)
241 241
242 /* Configure clock event */ 242 /* Configure clock event */
243 mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT); 243 mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
244 mtk_timer_enable_irq(evt, GPT_CLK_EVT);
245
246 clockevents_config_and_register(&evt->dev, rate, 0x3, 244 clockevents_config_and_register(&evt->dev, rate, 0x3,
247 0xffffffff); 245 0xffffffff);
246
247 mtk_timer_enable_irq(evt, GPT_CLK_EVT);
248
248 return; 249 return;
249 250
250err_clk_disable: 251err_clk_disable:
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 941f3f344e08..d9438af2bbd6 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -163,7 +163,7 @@ static struct irqaction pxa_ost0_irq = {
163 .dev_id = &ckevt_pxa_osmr0, 163 .dev_id = &ckevt_pxa_osmr0,
164}; 164};
165 165
166static void pxa_timer_common_init(int irq, unsigned long clock_tick_rate) 166static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
167{ 167{
168 timer_writel(0, OIER); 168 timer_writel(0, OIER);
169 timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); 169 timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
diff --git a/drivers/connector/Kconfig b/drivers/connector/Kconfig
index 6e6730f9dfd1..3de5f3a9a104 100644
--- a/drivers/connector/Kconfig
+++ b/drivers/connector/Kconfig
@@ -12,7 +12,7 @@ menuconfig CONNECTOR
12if CONNECTOR 12if CONNECTOR
13 13
14config PROC_EVENTS 14config PROC_EVENTS
15 boolean "Report process events to userspace" 15 bool "Report process events to userspace"
16 depends on CONNECTOR=y 16 depends on CONNECTOR=y
17 default y 17 default y
18 ---help--- 18 ---help---
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0f9a2c3c0e0d..1b06fc4640e2 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -26,13 +26,21 @@ config ARM_VEXPRESS_SPC_CPUFREQ
26 26
27 27
28config ARM_EXYNOS_CPUFREQ 28config ARM_EXYNOS_CPUFREQ
29 bool 29 tristate "SAMSUNG EXYNOS CPUfreq Driver"
30 depends on CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412 || SOC_EXYNOS5250
31 depends on THERMAL
32 help
33 This adds the CPUFreq driver for Samsung EXYNOS platforms.
34 Supported SoC versions are:
35 Exynos4210, Exynos4212, Exynos4412, and Exynos5250.
36
37 If in doubt, say N.
30 38
31config ARM_EXYNOS4210_CPUFREQ 39config ARM_EXYNOS4210_CPUFREQ
32 bool "SAMSUNG EXYNOS4210" 40 bool "SAMSUNG EXYNOS4210"
33 depends on CPU_EXYNOS4210 41 depends on CPU_EXYNOS4210
42 depends on ARM_EXYNOS_CPUFREQ
34 default y 43 default y
35 select ARM_EXYNOS_CPUFREQ
36 help 44 help
37 This adds the CPUFreq driver for Samsung EXYNOS4210 45 This adds the CPUFreq driver for Samsung EXYNOS4210
38 SoC (S5PV310 or S5PC210). 46 SoC (S5PV310 or S5PC210).
@@ -42,8 +50,8 @@ config ARM_EXYNOS4210_CPUFREQ
42config ARM_EXYNOS4X12_CPUFREQ 50config ARM_EXYNOS4X12_CPUFREQ
43 bool "SAMSUNG EXYNOS4x12" 51 bool "SAMSUNG EXYNOS4x12"
44 depends on SOC_EXYNOS4212 || SOC_EXYNOS4412 52 depends on SOC_EXYNOS4212 || SOC_EXYNOS4412
53 depends on ARM_EXYNOS_CPUFREQ
45 default y 54 default y
46 select ARM_EXYNOS_CPUFREQ
47 help 55 help
48 This adds the CPUFreq driver for Samsung EXYNOS4X12 56 This adds the CPUFreq driver for Samsung EXYNOS4X12
49 SoC (EXYNOS4212 or EXYNOS4412). 57 SoC (EXYNOS4212 or EXYNOS4412).
@@ -53,28 +61,14 @@ config ARM_EXYNOS4X12_CPUFREQ
53config ARM_EXYNOS5250_CPUFREQ 61config ARM_EXYNOS5250_CPUFREQ
54 bool "SAMSUNG EXYNOS5250" 62 bool "SAMSUNG EXYNOS5250"
55 depends on SOC_EXYNOS5250 63 depends on SOC_EXYNOS5250
64 depends on ARM_EXYNOS_CPUFREQ
56 default y 65 default y
57 select ARM_EXYNOS_CPUFREQ
58 help 66 help
59 This adds the CPUFreq driver for Samsung EXYNOS5250 67 This adds the CPUFreq driver for Samsung EXYNOS5250
60 SoC. 68 SoC.
61 69
62 If in doubt, say N. 70 If in doubt, say N.
63 71
64config ARM_EXYNOS5440_CPUFREQ
65 bool "SAMSUNG EXYNOS5440"
66 depends on SOC_EXYNOS5440
67 depends on HAVE_CLK && OF
68 select PM_OPP
69 default y
70 help
71 This adds the CPUFreq driver for Samsung EXYNOS5440
72 SoC. The nature of exynos5440 clock controller is
73 different than previous exynos controllers so not using
74 the common exynos framework.
75
76 If in doubt, say N.
77
78config ARM_EXYNOS_CPU_FREQ_BOOST_SW 72config ARM_EXYNOS_CPU_FREQ_BOOST_SW
79 bool "EXYNOS Frequency Overclocking - Software" 73 bool "EXYNOS Frequency Overclocking - Software"
80 depends on ARM_EXYNOS_CPUFREQ && THERMAL 74 depends on ARM_EXYNOS_CPUFREQ && THERMAL
@@ -90,6 +84,20 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW
90 84
91 If in doubt, say N. 85 If in doubt, say N.
92 86
87config ARM_EXYNOS5440_CPUFREQ
88 tristate "SAMSUNG EXYNOS5440"
89 depends on SOC_EXYNOS5440
90 depends on HAVE_CLK && OF
91 select PM_OPP
92 default y
93 help
94 This adds the CPUFreq driver for Samsung EXYNOS5440
95 SoC. The nature of exynos5440 clock controller is
96 different than previous exynos controllers so not using
97 the common exynos framework.
98
99 If in doubt, say N.
100
93config ARM_HIGHBANK_CPUFREQ 101config ARM_HIGHBANK_CPUFREQ
94 tristate "Calxeda Highbank-based" 102 tristate "Calxeda Highbank-based"
95 depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR 103 depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 72564b701b4a..7ea24413cee6 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -26,7 +26,7 @@ config CPU_FREQ_MAPLE
26config PPC_CORENET_CPUFREQ 26config PPC_CORENET_CPUFREQ
27 tristate "CPU frequency scaling driver for Freescale E500MC SoCs" 27 tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
28 depends on PPC_E500MC && OF && COMMON_CLK 28 depends on PPC_E500MC && OF && COMMON_CLK
29 select CLK_PPC_CORENET 29 select CLK_QORIQ
30 help 30 help
31 This adds the CPUFreq driver support for Freescale e500mc, 31 This adds the CPUFreq driver support for Freescale e500mc,
32 e5500 and e6500 series SoCs which are capable of changing 32 e5500 and e6500 series SoCs which are capable of changing
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 8b4220ac888b..82a1821471fd 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -52,10 +52,11 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
52 52
53obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o 53obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
54obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o 54obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
55obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o 55obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += arm-exynos-cpufreq.o
56obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o 56arm-exynos-cpufreq-y := exynos-cpufreq.o
57obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o 57arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
58obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o 58arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
59arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
59obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o 60obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
60obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o 61obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
61obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o 62obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index f99a0b0b7c06..5e98c6b1f284 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -18,10 +18,13 @@
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/cpu_cooling.h>
22#include <linux/cpu.h>
21 23
22#include "exynos-cpufreq.h" 24#include "exynos-cpufreq.h"
23 25
24static struct exynos_dvfs_info *exynos_info; 26static struct exynos_dvfs_info *exynos_info;
27static struct thermal_cooling_device *cdev;
25static struct regulator *arm_regulator; 28static struct regulator *arm_regulator;
26static unsigned int locking_frequency; 29static unsigned int locking_frequency;
27 30
@@ -156,6 +159,7 @@ static struct cpufreq_driver exynos_driver = {
156 159
157static int exynos_cpufreq_probe(struct platform_device *pdev) 160static int exynos_cpufreq_probe(struct platform_device *pdev)
158{ 161{
162 struct device_node *cpus, *np;
159 int ret = -EINVAL; 163 int ret = -EINVAL;
160 164
161 exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL); 165 exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
@@ -198,9 +202,36 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
198 /* Done here as we want to capture boot frequency */ 202 /* Done here as we want to capture boot frequency */
199 locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000; 203 locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
200 204
201 if (!cpufreq_register_driver(&exynos_driver)) 205 ret = cpufreq_register_driver(&exynos_driver);
206 if (ret)
207 goto err_cpufreq_reg;
208
209 cpus = of_find_node_by_path("/cpus");
210 if (!cpus) {
211 pr_err("failed to find cpus node\n");
212 return 0;
213 }
214
215 np = of_get_next_child(cpus, NULL);
216 if (!np) {
217 pr_err("failed to find cpus child node\n");
218 of_node_put(cpus);
202 return 0; 219 return 0;
220 }
221
222 if (of_find_property(np, "#cooling-cells", NULL)) {
223 cdev = of_cpufreq_cooling_register(np,
224 cpu_present_mask);
225 if (IS_ERR(cdev))
226 pr_err("running cpufreq without cooling device: %ld\n",
227 PTR_ERR(cdev));
228 }
229 of_node_put(np);
230 of_node_put(cpus);
231
232 return 0;
203 233
234err_cpufreq_reg:
204 dev_err(&pdev->dev, "failed to register cpufreq driver\n"); 235 dev_err(&pdev->dev, "failed to register cpufreq driver\n");
205 regulator_put(arm_regulator); 236 regulator_put(arm_regulator);
206err_vdd_arm: 237err_vdd_arm:
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 2fd53eaaec20..d6d425773fa4 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -263,7 +263,7 @@ out:
263} 263}
264 264
265#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE 265#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
266static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) 266static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
267{ 267{
268 int count, v, i, found; 268 int count, v, i, found;
269 struct cpufreq_frequency_table *pos; 269 struct cpufreq_frequency_table *pos;
@@ -333,7 +333,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
333 .notifier_call = s3c2416_cpufreq_reboot_notifier_evt, 333 .notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
334}; 334};
335 335
336static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) 336static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
337{ 337{
338 struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; 338 struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
339 struct cpufreq_frequency_table *pos; 339 struct cpufreq_frequency_table *pos;
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index d00f1cee4509..733aa5153e74 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -144,11 +144,6 @@ static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
144 (cfg->info->set_fvco)(cfg); 144 (cfg->info->set_fvco)(cfg);
145} 145}
146 146
147static inline void s3c_cpufreq_resume_clocks(void)
148{
149 cpu_cur.info->resume_clocks();
150}
151
152static inline void s3c_cpufreq_updateclk(struct clk *clk, 147static inline void s3c_cpufreq_updateclk(struct clk *clk,
153 unsigned int freq) 148 unsigned int freq)
154{ 149{
@@ -417,9 +412,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
417 412
418 last_target = ~0; /* invalidate last_target setting */ 413 last_target = ~0; /* invalidate last_target setting */
419 414
420 /* first, find out what speed we resumed at. */
421 s3c_cpufreq_resume_clocks();
422
423 /* whilst we will be called later on, we try and re-set the 415 /* whilst we will be called later on, we try and re-set the
424 * cpu frequencies as soon as possible so that we do not end 416 * cpu frequencies as soon as possible so that we do not end
425 * up resuming devices and then immediately having to re-set 417 * up resuming devices and then immediately having to re-set
@@ -454,7 +446,7 @@ static struct cpufreq_driver s3c24xx_driver = {
454}; 446};
455 447
456 448
457int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info) 449int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
458{ 450{
459 if (!info || !info->name) { 451 if (!info || !info->name) {
460 printk(KERN_ERR "%s: failed to pass valid information\n", 452 printk(KERN_ERR "%s: failed to pass valid information\n",
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index aedec0957934..59372077ec7c 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -13,6 +13,7 @@
13#include <linux/notifier.h> 13#include <linux/notifier.h>
14#include <linux/clockchips.h> 14#include <linux/clockchips.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/slab.h>
16 17
17#include <asm/machdep.h> 18#include <asm/machdep.h>
18#include <asm/firmware.h> 19#include <asm/firmware.h>
@@ -158,70 +159,83 @@ static int powernv_add_idle_states(void)
158 struct device_node *power_mgt; 159 struct device_node *power_mgt;
159 int nr_idle_states = 1; /* Snooze */ 160 int nr_idle_states = 1; /* Snooze */
160 int dt_idle_states; 161 int dt_idle_states;
161 const __be32 *idle_state_flags; 162 u32 *latency_ns, *residency_ns, *flags;
162 const __be32 *idle_state_latency; 163 int i, rc;
163 u32 len_flags, flags, latency_ns;
164 int i;
165 164
166 /* Currently we have snooze statically defined */ 165 /* Currently we have snooze statically defined */
167 166
168 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); 167 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
169 if (!power_mgt) { 168 if (!power_mgt) {
170 pr_warn("opal: PowerMgmt Node not found\n"); 169 pr_warn("opal: PowerMgmt Node not found\n");
171 return nr_idle_states; 170 goto out;
172 } 171 }
173 172
174 idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags); 173 /* Read values of any property to determine the num of idle states */
175 if (!idle_state_flags) { 174 dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
176 pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n"); 175 if (dt_idle_states < 0) {
177 return nr_idle_states; 176 pr_warn("cpuidle-powernv: no idle states found in the DT\n");
177 goto out;
178 } 178 }
179 179
180 idle_state_latency = of_get_property(power_mgt, 180 flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
181 "ibm,cpu-idle-state-latencies-ns", NULL); 181 if (of_property_read_u32_array(power_mgt,
182 if (!idle_state_latency) { 182 "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
183 pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-latencies-ns\n"); 183 pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
184 return nr_idle_states; 184 goto out_free_flags;
185 } 185 }
186 186
187 dt_idle_states = len_flags / sizeof(u32); 187 latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL);
188 rc = of_property_read_u32_array(power_mgt,
189 "ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states);
190 if (rc) {
191 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
192 goto out_free_latency;
193 }
188 194
189 for (i = 0; i < dt_idle_states; i++) { 195 residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL);
196 rc = of_property_read_u32_array(power_mgt,
197 "ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
190 198
191 flags = be32_to_cpu(idle_state_flags[i]); 199 for (i = 0; i < dt_idle_states; i++) {
192 200
193 /* Cpuidle accepts exit_latency in us and we estimate 201 /*
194 * target residency to be 10x exit_latency 202 * Cpuidle accepts exit_latency and target_residency in us.
203 * Use default target_residency values if f/w does not expose it.
195 */ 204 */
196 latency_ns = be32_to_cpu(idle_state_latency[i]); 205 if (flags[i] & OPAL_PM_NAP_ENABLED) {
197 if (flags & OPAL_PM_NAP_ENABLED) {
198 /* Add NAP state */ 206 /* Add NAP state */
199 strcpy(powernv_states[nr_idle_states].name, "Nap"); 207 strcpy(powernv_states[nr_idle_states].name, "Nap");
200 strcpy(powernv_states[nr_idle_states].desc, "Nap"); 208 strcpy(powernv_states[nr_idle_states].desc, "Nap");
201 powernv_states[nr_idle_states].flags = 0; 209 powernv_states[nr_idle_states].flags = 0;
202 powernv_states[nr_idle_states].exit_latency = 210 powernv_states[nr_idle_states].target_residency = 100;
203 ((unsigned int)latency_ns) / 1000;
204 powernv_states[nr_idle_states].target_residency =
205 ((unsigned int)latency_ns / 100);
206 powernv_states[nr_idle_states].enter = &nap_loop; 211 powernv_states[nr_idle_states].enter = &nap_loop;
207 nr_idle_states++; 212 } else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
208 } 213 flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
209
210 if (flags & OPAL_PM_SLEEP_ENABLED ||
211 flags & OPAL_PM_SLEEP_ENABLED_ER1) {
212 /* Add FASTSLEEP state */ 214 /* Add FASTSLEEP state */
213 strcpy(powernv_states[nr_idle_states].name, "FastSleep"); 215 strcpy(powernv_states[nr_idle_states].name, "FastSleep");
214 strcpy(powernv_states[nr_idle_states].desc, "FastSleep"); 216 strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
215 powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP; 217 powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
216 powernv_states[nr_idle_states].exit_latency = 218 powernv_states[nr_idle_states].target_residency = 300000;
217 ((unsigned int)latency_ns) / 1000;
218 powernv_states[nr_idle_states].target_residency =
219 ((unsigned int)latency_ns / 100);
220 powernv_states[nr_idle_states].enter = &fastsleep_loop; 219 powernv_states[nr_idle_states].enter = &fastsleep_loop;
221 nr_idle_states++;
222 } 220 }
221
222 powernv_states[nr_idle_states].exit_latency =
223 ((unsigned int)latency_ns[i]) / 1000;
224
225 if (!rc) {
226 powernv_states[nr_idle_states].target_residency =
227 ((unsigned int)residency_ns[i]) / 1000;
228 }
229
230 nr_idle_states++;
223 } 231 }
224 232
233 kfree(residency_ns);
234out_free_latency:
235 kfree(latency_ns);
236out_free_flags:
237 kfree(flags);
238out:
225 return nr_idle_states; 239 return nr_idle_states;
226} 240}
227 241
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index d594ae962ed2..fded0a5cfcd7 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -606,12 +606,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx)
606 dev_dbg(ctx->device->dev, "[%s]: ", __func__); 606 dev_dbg(ctx->device->dev, "[%s]: ", __func__);
607 607
608 chan = ctx->device->dma.chan_mem2cryp; 608 chan = ctx->device->dma.chan_mem2cryp;
609 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 609 dmaengine_terminate_all(chan);
610 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, 610 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
611 ctx->device->dma.sg_src_len, DMA_TO_DEVICE); 611 ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
612 612
613 chan = ctx->device->dma.chan_cryp2mem; 613 chan = ctx->device->dma.chan_cryp2mem;
614 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 614 dmaengine_terminate_all(chan);
615 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, 615 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
616 ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); 616 ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
617} 617}
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 70a20871e998..187a8fd7eee7 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -202,7 +202,7 @@ static void hash_dma_done(struct hash_ctx *ctx)
202 struct dma_chan *chan; 202 struct dma_chan *chan;
203 203
204 chan = ctx->device->dma.chan_mem2hash; 204 chan = ctx->device->dma.chan_mem2hash;
205 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 205 dmaengine_terminate_all(chan);
206 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, 206 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
207 ctx->device->dma.sg_len, DMA_TO_DEVICE); 207 ctx->device->dma.sg_len, DMA_TO_DEVICE);
208} 208}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index faf30a4e642b..a874b6ec6650 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -416,6 +416,15 @@ config NBPFAXI_DMA
416 help 416 help
417 Support for "Type-AXI" NBPF DMA IPs from Renesas 417 Support for "Type-AXI" NBPF DMA IPs from Renesas
418 418
419config IMG_MDC_DMA
420 tristate "IMG MDC support"
421 depends on MIPS || COMPILE_TEST
422 depends on MFD_SYSCON
423 select DMA_ENGINE
424 select DMA_VIRTUAL_CHANNELS
425 help
426 Enable support for the IMG multi-threaded DMA controller (MDC).
427
419config DMA_ENGINE 428config DMA_ENGINE
420 bool 429 bool
421 430
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 2022b5451377..f915f61ec574 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -19,7 +19,7 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
19obj-$(CONFIG_AT_XDMAC) += at_xdmac.o 19obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
20obj-$(CONFIG_MX3_IPU) += ipu/ 20obj-$(CONFIG_MX3_IPU) += ipu/
21obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o 21obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
22obj-$(CONFIG_SH_DMAE_BASE) += sh/ 22obj-$(CONFIG_RENESAS_DMA) += sh/
23obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o 23obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
24obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 24obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
25obj-$(CONFIG_IMX_SDMA) += imx-sdma.o 25obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
@@ -50,3 +50,4 @@ obj-y += xilinx/
50obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o 50obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
51obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o 51obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
52obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o 52obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
53obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 1364d00881dd..4a5fd245014e 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1386,32 +1386,6 @@ static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
1386 return pl08x_cctl(cctl); 1386 return pl08x_cctl(cctl);
1387} 1387}
1388 1388
1389static int dma_set_runtime_config(struct dma_chan *chan,
1390 struct dma_slave_config *config)
1391{
1392 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1393 struct pl08x_driver_data *pl08x = plchan->host;
1394
1395 if (!plchan->slave)
1396 return -EINVAL;
1397
1398 /* Reject definitely invalid configurations */
1399 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1400 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1401 return -EINVAL;
1402
1403 if (config->device_fc && pl08x->vd->pl080s) {
1404 dev_err(&pl08x->adev->dev,
1405 "%s: PL080S does not support peripheral flow control\n",
1406 __func__);
1407 return -EINVAL;
1408 }
1409
1410 plchan->cfg = *config;
1411
1412 return 0;
1413}
1414
1415/* 1389/*
1416 * Slave transactions callback to the slave device to allow 1390 * Slave transactions callback to the slave device to allow
1417 * synchronization of slave DMA signals with the DMAC enable 1391 * synchronization of slave DMA signals with the DMAC enable
@@ -1693,20 +1667,71 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
1693 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1667 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1694} 1668}
1695 1669
1696static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1670static int pl08x_config(struct dma_chan *chan,
1697 unsigned long arg) 1671 struct dma_slave_config *config)
1672{
1673 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1674 struct pl08x_driver_data *pl08x = plchan->host;
1675
1676 if (!plchan->slave)
1677 return -EINVAL;
1678
1679 /* Reject definitely invalid configurations */
1680 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1681 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1682 return -EINVAL;
1683
1684 if (config->device_fc && pl08x->vd->pl080s) {
1685 dev_err(&pl08x->adev->dev,
1686 "%s: PL080S does not support peripheral flow control\n",
1687 __func__);
1688 return -EINVAL;
1689 }
1690
1691 plchan->cfg = *config;
1692
1693 return 0;
1694}
1695
1696static int pl08x_terminate_all(struct dma_chan *chan)
1698{ 1697{
1699 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1698 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1700 struct pl08x_driver_data *pl08x = plchan->host; 1699 struct pl08x_driver_data *pl08x = plchan->host;
1701 unsigned long flags; 1700 unsigned long flags;
1702 int ret = 0;
1703 1701
1704 /* Controls applicable to inactive channels */ 1702 spin_lock_irqsave(&plchan->vc.lock, flags);
1705 if (cmd == DMA_SLAVE_CONFIG) { 1703 if (!plchan->phychan && !plchan->at) {
1706 return dma_set_runtime_config(chan, 1704 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1707 (struct dma_slave_config *)arg); 1705 return 0;
1708 } 1706 }
1709 1707
1708 plchan->state = PL08X_CHAN_IDLE;
1709
1710 if (plchan->phychan) {
1711 /*
1712 * Mark physical channel as free and free any slave
1713 * signal
1714 */
1715 pl08x_phy_free(plchan);
1716 }
1717 /* Dequeue jobs and free LLIs */
1718 if (plchan->at) {
1719 pl08x_desc_free(&plchan->at->vd);
1720 plchan->at = NULL;
1721 }
1722 /* Dequeue jobs not yet fired as well */
1723 pl08x_free_txd_list(pl08x, plchan);
1724
1725 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1726
1727 return 0;
1728}
1729
1730static int pl08x_pause(struct dma_chan *chan)
1731{
1732 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1733 unsigned long flags;
1734
1710 /* 1735 /*
1711 * Anything succeeds on channels with no physical allocation and 1736 * Anything succeeds on channels with no physical allocation and
1712 * no queued transfers. 1737 * no queued transfers.
@@ -1717,42 +1742,35 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1717 return 0; 1742 return 0;
1718 } 1743 }
1719 1744
1720 switch (cmd) { 1745 pl08x_pause_phy_chan(plchan->phychan);
1721 case DMA_TERMINATE_ALL: 1746 plchan->state = PL08X_CHAN_PAUSED;
1722 plchan->state = PL08X_CHAN_IDLE;
1723 1747
1724 if (plchan->phychan) { 1748 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1725 /* 1749
1726 * Mark physical channel as free and free any slave 1750 return 0;
1727 * signal 1751}
1728 */ 1752
1729 pl08x_phy_free(plchan); 1753static int pl08x_resume(struct dma_chan *chan)
1730 } 1754{
1731 /* Dequeue jobs and free LLIs */ 1755 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1732 if (plchan->at) { 1756 unsigned long flags;
1733 pl08x_desc_free(&plchan->at->vd); 1757
1734 plchan->at = NULL; 1758 /*
1735 } 1759 * Anything succeeds on channels with no physical allocation and
1736 /* Dequeue jobs not yet fired as well */ 1760 * no queued transfers.
1737 pl08x_free_txd_list(pl08x, plchan); 1761 */
1738 break; 1762 spin_lock_irqsave(&plchan->vc.lock, flags);
1739 case DMA_PAUSE: 1763 if (!plchan->phychan && !plchan->at) {
1740 pl08x_pause_phy_chan(plchan->phychan); 1764 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1741 plchan->state = PL08X_CHAN_PAUSED; 1765 return 0;
1742 break;
1743 case DMA_RESUME:
1744 pl08x_resume_phy_chan(plchan->phychan);
1745 plchan->state = PL08X_CHAN_RUNNING;
1746 break;
1747 default:
1748 /* Unknown command */
1749 ret = -ENXIO;
1750 break;
1751 } 1766 }
1752 1767
1768 pl08x_resume_phy_chan(plchan->phychan);
1769 plchan->state = PL08X_CHAN_RUNNING;
1770
1753 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1771 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1754 1772
1755 return ret; 1773 return 0;
1756} 1774}
1757 1775
1758bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1776bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
@@ -2048,7 +2066,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2048 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2066 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
2049 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 2067 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
2050 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 2068 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
2051 pl08x->memcpy.device_control = pl08x_control; 2069 pl08x->memcpy.device_config = pl08x_config;
2070 pl08x->memcpy.device_pause = pl08x_pause;
2071 pl08x->memcpy.device_resume = pl08x_resume;
2072 pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
2052 2073
2053 /* Initialize slave engine */ 2074 /* Initialize slave engine */
2054 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2075 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
@@ -2061,7 +2082,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2061 pl08x->slave.device_issue_pending = pl08x_issue_pending; 2082 pl08x->slave.device_issue_pending = pl08x_issue_pending;
2062 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 2083 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
2063 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; 2084 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
2064 pl08x->slave.device_control = pl08x_control; 2085 pl08x->slave.device_config = pl08x_config;
2086 pl08x->slave.device_pause = pl08x_pause;
2087 pl08x->slave.device_resume = pl08x_resume;
2088 pl08x->slave.device_terminate_all = pl08x_terminate_all;
2065 2089
2066 /* Get the platform data */ 2090 /* Get the platform data */
2067 pl08x->pd = dev_get_platdata(&adev->dev); 2091 pl08x->pd = dev_get_platdata(&adev->dev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index ca9dd2613283..1e1a4c567542 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -42,6 +42,11 @@
42#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 42#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
43#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 43#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF)) 44 |ATC_DIF(AT_DMA_MEM_IF))
45#define ATC_DMA_BUSWIDTHS\
46 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
47 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
45 50
46/* 51/*
47 * Initial number of descriptors to allocate for each channel. This could 52 * Initial number of descriptors to allocate for each channel. This could
@@ -972,11 +977,13 @@ err_out:
972 return NULL; 977 return NULL;
973} 978}
974 979
975static int set_runtime_config(struct dma_chan *chan, 980static int atc_config(struct dma_chan *chan,
976 struct dma_slave_config *sconfig) 981 struct dma_slave_config *sconfig)
977{ 982{
978 struct at_dma_chan *atchan = to_at_dma_chan(chan); 983 struct at_dma_chan *atchan = to_at_dma_chan(chan);
979 984
985 dev_vdbg(chan2dev(chan), "%s\n", __func__);
986
980 /* Check if it is chan is configured for slave transfers */ 987 /* Check if it is chan is configured for slave transfers */
981 if (!chan->private) 988 if (!chan->private)
982 return -EINVAL; 989 return -EINVAL;
@@ -989,9 +996,28 @@ static int set_runtime_config(struct dma_chan *chan,
989 return 0; 996 return 0;
990} 997}
991 998
999static int atc_pause(struct dma_chan *chan)
1000{
1001 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1002 struct at_dma *atdma = to_at_dma(chan->device);
1003 int chan_id = atchan->chan_common.chan_id;
1004 unsigned long flags;
992 1005
993static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1006 LIST_HEAD(list);
994 unsigned long arg) 1007
1008 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1009
1010 spin_lock_irqsave(&atchan->lock, flags);
1011
1012 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1013 set_bit(ATC_IS_PAUSED, &atchan->status);
1014
1015 spin_unlock_irqrestore(&atchan->lock, flags);
1016
1017 return 0;
1018}
1019
1020static int atc_resume(struct dma_chan *chan)
995{ 1021{
996 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1022 struct at_dma_chan *atchan = to_at_dma_chan(chan);
997 struct at_dma *atdma = to_at_dma(chan->device); 1023 struct at_dma *atdma = to_at_dma(chan->device);
@@ -1000,60 +1026,61 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1000 1026
1001 LIST_HEAD(list); 1027 LIST_HEAD(list);
1002 1028
1003 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 1029 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1004 1030
1005 if (cmd == DMA_PAUSE) { 1031 if (!atc_chan_is_paused(atchan))
1006 spin_lock_irqsave(&atchan->lock, flags); 1032 return 0;
1007 1033
1008 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 1034 spin_lock_irqsave(&atchan->lock, flags);
1009 set_bit(ATC_IS_PAUSED, &atchan->status);
1010 1035
1011 spin_unlock_irqrestore(&atchan->lock, flags); 1036 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1012 } else if (cmd == DMA_RESUME) { 1037 clear_bit(ATC_IS_PAUSED, &atchan->status);
1013 if (!atc_chan_is_paused(atchan))
1014 return 0;
1015 1038
1016 spin_lock_irqsave(&atchan->lock, flags); 1039 spin_unlock_irqrestore(&atchan->lock, flags);
1017 1040
1018 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 1041 return 0;
1019 clear_bit(ATC_IS_PAUSED, &atchan->status); 1042}
1020 1043
1021 spin_unlock_irqrestore(&atchan->lock, flags); 1044static int atc_terminate_all(struct dma_chan *chan)
1022 } else if (cmd == DMA_TERMINATE_ALL) { 1045{
1023 struct at_desc *desc, *_desc; 1046 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1024 /* 1047 struct at_dma *atdma = to_at_dma(chan->device);
1025 * This is only called when something went wrong elsewhere, so 1048 int chan_id = atchan->chan_common.chan_id;
1026 * we don't really care about the data. Just disable the 1049 struct at_desc *desc, *_desc;
1027 * channel. We still have to poll the channel enable bit due 1050 unsigned long flags;
1028 * to AHB/HSB limitations.
1029 */
1030 spin_lock_irqsave(&atchan->lock, flags);
1031 1051
1032 /* disabling channel: must also remove suspend state */ 1052 LIST_HEAD(list);
1033 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1034 1053
1035 /* confirm that this channel is disabled */ 1054 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1036 while (dma_readl(atdma, CHSR) & atchan->mask)
1037 cpu_relax();
1038 1055
1039 /* active_list entries will end up before queued entries */ 1056 /*
1040 list_splice_init(&atchan->queue, &list); 1057 * This is only called when something went wrong elsewhere, so
1041 list_splice_init(&atchan->active_list, &list); 1058 * we don't really care about the data. Just disable the
1059 * channel. We still have to poll the channel enable bit due
1060 * to AHB/HSB limitations.
1061 */
1062 spin_lock_irqsave(&atchan->lock, flags);
1042 1063
1043 /* Flush all pending and queued descriptors */ 1064 /* disabling channel: must also remove suspend state */
1044 list_for_each_entry_safe(desc, _desc, &list, desc_node) 1065 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1045 atc_chain_complete(atchan, desc);
1046 1066
1047 clear_bit(ATC_IS_PAUSED, &atchan->status); 1067 /* confirm that this channel is disabled */
1048 /* if channel dedicated to cyclic operations, free it */ 1068 while (dma_readl(atdma, CHSR) & atchan->mask)
1049 clear_bit(ATC_IS_CYCLIC, &atchan->status); 1069 cpu_relax();
1050 1070
1051 spin_unlock_irqrestore(&atchan->lock, flags); 1071 /* active_list entries will end up before queued entries */
1052 } else if (cmd == DMA_SLAVE_CONFIG) { 1072 list_splice_init(&atchan->queue, &list);
1053 return set_runtime_config(chan, (struct dma_slave_config *)arg); 1073 list_splice_init(&atchan->active_list, &list);
1054 } else { 1074
1055 return -ENXIO; 1075 /* Flush all pending and queued descriptors */
1056 } 1076 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1077 atc_chain_complete(atchan, desc);
1078
1079 clear_bit(ATC_IS_PAUSED, &atchan->status);
1080 /* if channel dedicated to cyclic operations, free it */
1081 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1082
1083 spin_unlock_irqrestore(&atchan->lock, flags);
1057 1084
1058 return 0; 1085 return 0;
1059} 1086}
@@ -1505,7 +1532,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
1505 /* controller can do slave DMA: can trigger cyclic transfers */ 1532 /* controller can do slave DMA: can trigger cyclic transfers */
1506 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 1533 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1507 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1534 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1508 atdma->dma_common.device_control = atc_control; 1535 atdma->dma_common.device_config = atc_config;
1536 atdma->dma_common.device_pause = atc_pause;
1537 atdma->dma_common.device_resume = atc_resume;
1538 atdma->dma_common.device_terminate_all = atc_terminate_all;
1539 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1540 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1541 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1542 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1509 } 1543 }
1510 1544
1511 dma_writel(atdma, EN, AT_DMA_ENABLE); 1545 dma_writel(atdma, EN, AT_DMA_ENABLE);
@@ -1622,7 +1656,7 @@ static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1622 if (!atc_chan_is_paused(atchan)) { 1656 if (!atc_chan_is_paused(atchan)) {
1623 dev_warn(chan2dev(chan), 1657 dev_warn(chan2dev(chan),
1624 "cyclic channel not paused, should be done by channel user\n"); 1658 "cyclic channel not paused, should be done by channel user\n");
1625 atc_control(chan, DMA_PAUSE, 0); 1659 atc_pause(chan);
1626 } 1660 }
1627 1661
1628 /* now preserve additional data for cyclic operations */ 1662 /* now preserve additional data for cyclic operations */
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 2787aba60c6b..d6bba6c636c2 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -232,7 +232,8 @@ enum atc_status {
232 * @save_dscr: for cyclic operations, preserve next descriptor address in 232 * @save_dscr: for cyclic operations, preserve next descriptor address in
233 * the cyclic list on suspend/resume cycle 233 * the cyclic list on suspend/resume cycle
234 * @remain_desc: to save remain desc length 234 * @remain_desc: to save remain desc length
235 * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG 235 * @dma_sconfig: configuration for slave transfers, passed via
236 * .device_config
236 * @lock: serializes enqueue/dequeue operations to descriptors lists 237 * @lock: serializes enqueue/dequeue operations to descriptors lists
237 * @active_list: list of descriptors dmaengine is being running on 238 * @active_list: list of descriptors dmaengine is being running on
238 * @queue: list of descriptors ready to be submitted to engine 239 * @queue: list of descriptors ready to be submitted to engine
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b60d77a22df6..09e2825a547a 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -25,6 +25,7 @@
25#include <linux/dmapool.h> 25#include <linux/dmapool.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/irq.h> 27#include <linux/irq.h>
28#include <linux/kernel.h>
28#include <linux/list.h> 29#include <linux/list.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/of_dma.h> 31#include <linux/of_dma.h>
@@ -174,6 +175,13 @@
174 175
175#define AT_XDMAC_MAX_CHAN 0x20 176#define AT_XDMAC_MAX_CHAN 0x20
176 177
178#define AT_XDMAC_DMA_BUSWIDTHS\
179 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
180 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
181 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
182 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
183 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
184
177enum atc_status { 185enum atc_status {
178 AT_XDMAC_CHAN_IS_CYCLIC = 0, 186 AT_XDMAC_CHAN_IS_CYCLIC = 0,
179 AT_XDMAC_CHAN_IS_PAUSED, 187 AT_XDMAC_CHAN_IS_PAUSED,
@@ -184,15 +192,15 @@ struct at_xdmac_chan {
184 struct dma_chan chan; 192 struct dma_chan chan;
185 void __iomem *ch_regs; 193 void __iomem *ch_regs;
186 u32 mask; /* Channel Mask */ 194 u32 mask; /* Channel Mask */
187 u32 cfg[3]; /* Channel Configuration Register */ 195 u32 cfg[2]; /* Channel Configuration Register */
188 #define AT_XDMAC_CUR_CFG 0 /* Current channel conf */ 196 #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */
189 #define AT_XDMAC_DEV_TO_MEM_CFG 1 /* Predifined dev to mem channel conf */ 197 #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */
190 #define AT_XDMAC_MEM_TO_DEV_CFG 2 /* Predifined mem to dev channel conf */
191 u8 perid; /* Peripheral ID */ 198 u8 perid; /* Peripheral ID */
192 u8 perif; /* Peripheral Interface */ 199 u8 perif; /* Peripheral Interface */
193 u8 memif; /* Memory Interface */ 200 u8 memif; /* Memory Interface */
194 u32 per_src_addr; 201 u32 per_src_addr;
195 u32 per_dst_addr; 202 u32 per_dst_addr;
203 u32 save_cc;
196 u32 save_cim; 204 u32 save_cim;
197 u32 save_cnda; 205 u32 save_cnda;
198 u32 save_cndc; 206 u32 save_cndc;
@@ -344,20 +352,13 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
344 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg); 352 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
345 353
346 /* 354 /*
347 * When doing memory to memory transfer we need to use the next 355 * When doing non cyclic transfer we need to use the next
348 * descriptor view 2 since some fields of the configuration register 356 * descriptor view 2 since some fields of the configuration register
349 * depend on transfer size and src/dest addresses. 357 * depend on transfer size and src/dest addresses.
350 */ 358 */
351 if (is_slave_direction(first->direction)) { 359 if (at_xdmac_chan_is_cyclic(atchan)) {
352 reg = AT_XDMAC_CNDC_NDVIEW_NDV1; 360 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
353 if (first->direction == DMA_MEM_TO_DEV) 361 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
354 atchan->cfg[AT_XDMAC_CUR_CFG] =
355 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
356 else
357 atchan->cfg[AT_XDMAC_CUR_CFG] =
358 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
359 at_xdmac_chan_write(atchan, AT_XDMAC_CC,
360 atchan->cfg[AT_XDMAC_CUR_CFG]);
361 } else { 362 } else {
362 /* 363 /*
363 * No need to write AT_XDMAC_CC reg, it will be done when the 364 * No need to write AT_XDMAC_CC reg, it will be done when the
@@ -561,7 +562,6 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
561 struct at_xdmac_desc *first = NULL, *prev = NULL; 562 struct at_xdmac_desc *first = NULL, *prev = NULL;
562 struct scatterlist *sg; 563 struct scatterlist *sg;
563 int i; 564 int i;
564 u32 cfg;
565 unsigned int xfer_size = 0; 565 unsigned int xfer_size = 0;
566 566
567 if (!sgl) 567 if (!sgl)
@@ -583,7 +583,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
583 /* Prepare descriptors. */ 583 /* Prepare descriptors. */
584 for_each_sg(sgl, sg, sg_len, i) { 584 for_each_sg(sgl, sg, sg_len, i) {
585 struct at_xdmac_desc *desc = NULL; 585 struct at_xdmac_desc *desc = NULL;
586 u32 len, mem; 586 u32 len, mem, dwidth, fixed_dwidth;
587 587
588 len = sg_dma_len(sg); 588 len = sg_dma_len(sg);
589 mem = sg_dma_address(sg); 589 mem = sg_dma_address(sg);
@@ -608,17 +608,21 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
608 if (direction == DMA_DEV_TO_MEM) { 608 if (direction == DMA_DEV_TO_MEM) {
609 desc->lld.mbr_sa = atchan->per_src_addr; 609 desc->lld.mbr_sa = atchan->per_src_addr;
610 desc->lld.mbr_da = mem; 610 desc->lld.mbr_da = mem;
611 cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; 611 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
612 } else { 612 } else {
613 desc->lld.mbr_sa = mem; 613 desc->lld.mbr_sa = mem;
614 desc->lld.mbr_da = atchan->per_dst_addr; 614 desc->lld.mbr_da = atchan->per_dst_addr;
615 cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; 615 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
616 } 616 }
617 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 /* next descriptor view */ 617 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
618 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ 618 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
619 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ 619 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
620 | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */ 620 : AT_XDMAC_CC_DWIDTH_BYTE;
621 | len / (1 << at_xdmac_get_dwidth(cfg)); /* microblock length */ 621 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
622 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
623 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
624 | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */
625 | (len >> fixed_dwidth); /* microblock length */
622 dev_dbg(chan2dev(chan), 626 dev_dbg(chan2dev(chan),
623 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", 627 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
624 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); 628 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
@@ -882,7 +886,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
882 enum dma_status ret; 886 enum dma_status ret;
883 int residue; 887 int residue;
884 u32 cur_nda, mask, value; 888 u32 cur_nda, mask, value;
885 u8 dwidth = at_xdmac_get_dwidth(atchan->cfg[AT_XDMAC_CUR_CFG]); 889 u8 dwidth = 0;
886 890
887 ret = dma_cookie_status(chan, cookie, txstate); 891 ret = dma_cookie_status(chan, cookie, txstate);
888 if (ret == DMA_COMPLETE) 892 if (ret == DMA_COMPLETE)
@@ -912,7 +916,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
912 */ 916 */
913 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; 917 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
914 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; 918 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
915 if ((atchan->cfg[AT_XDMAC_CUR_CFG] & mask) == value) { 919 if ((desc->lld.mbr_cfg & mask) == value) {
916 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); 920 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
917 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) 921 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
918 cpu_relax(); 922 cpu_relax();
@@ -926,6 +930,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
926 */ 930 */
927 descs_list = &desc->descs_list; 931 descs_list = &desc->descs_list;
928 list_for_each_entry_safe(desc, _desc, descs_list, desc_node) { 932 list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
933 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
929 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; 934 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
930 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) 935 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
931 break; 936 break;
@@ -1107,58 +1112,80 @@ static void at_xdmac_issue_pending(struct dma_chan *chan)
1107 return; 1112 return;
1108} 1113}
1109 1114
1110static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1115static int at_xdmac_device_config(struct dma_chan *chan,
1111 unsigned long arg) 1116 struct dma_slave_config *config)
1117{
1118 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1119 int ret;
1120
1121 dev_dbg(chan2dev(chan), "%s\n", __func__);
1122
1123 spin_lock_bh(&atchan->lock);
1124 ret = at_xdmac_set_slave_config(chan, config);
1125 spin_unlock_bh(&atchan->lock);
1126
1127 return ret;
1128}
1129
1130static int at_xdmac_device_pause(struct dma_chan *chan)
1112{ 1131{
1113 struct at_xdmac_desc *desc, *_desc;
1114 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1132 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1115 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1133 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1116 int ret = 0;
1117 1134
1118 dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd); 1135 dev_dbg(chan2dev(chan), "%s\n", __func__);
1136
1137 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1138 return 0;
1119 1139
1120 spin_lock_bh(&atchan->lock); 1140 spin_lock_bh(&atchan->lock);
1141 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1142 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1143 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1144 cpu_relax();
1145 spin_unlock_bh(&atchan->lock);
1121 1146
1122 switch (cmd) { 1147 return 0;
1123 case DMA_PAUSE: 1148}
1124 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1125 set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1126 break;
1127 1149
1128 case DMA_RESUME: 1150static int at_xdmac_device_resume(struct dma_chan *chan)
1129 if (!at_xdmac_chan_is_paused(atchan)) 1151{
1130 break; 1152 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1153 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1131 1154
1132 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); 1155 dev_dbg(chan2dev(chan), "%s\n", __func__);
1133 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1134 break;
1135 1156
1136 case DMA_TERMINATE_ALL: 1157 spin_lock_bh(&atchan->lock);
1137 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); 1158 if (!at_xdmac_chan_is_paused(atchan))
1138 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) 1159 return 0;
1139 cpu_relax(); 1160
1161 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
1162 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1163 spin_unlock_bh(&atchan->lock);
1140 1164
1141 /* Cancel all pending transfers. */ 1165 return 0;
1142 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) 1166}
1143 at_xdmac_remove_xfer(atchan, desc); 1167
1168static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1169{
1170 struct at_xdmac_desc *desc, *_desc;
1171 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1172 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1144 1173
1145 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); 1174 dev_dbg(chan2dev(chan), "%s\n", __func__);
1146 break;
1147 1175
1148 case DMA_SLAVE_CONFIG: 1176 spin_lock_bh(&atchan->lock);
1149 ret = at_xdmac_set_slave_config(chan, 1177 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1150 (struct dma_slave_config *)arg); 1178 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1151 break; 1179 cpu_relax();
1152 1180
1153 default: 1181 /* Cancel all pending transfers. */
1154 dev_err(chan2dev(chan), 1182 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1155 "unmanaged or unknown dma control cmd: %d\n", cmd); 1183 at_xdmac_remove_xfer(atchan, desc);
1156 ret = -ENXIO;
1157 }
1158 1184
1185 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1159 spin_unlock_bh(&atchan->lock); 1186 spin_unlock_bh(&atchan->lock);
1160 1187
1161 return ret; 1188 return 0;
1162} 1189}
1163 1190
1164static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) 1191static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
@@ -1217,27 +1244,6 @@ static void at_xdmac_free_chan_resources(struct dma_chan *chan)
1217 return; 1244 return;
1218} 1245}
1219 1246
1220#define AT_XDMAC_DMA_BUSWIDTHS\
1221 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
1222 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
1223 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
1224 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
1225 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
1226
1227static int at_xdmac_device_slave_caps(struct dma_chan *dchan,
1228 struct dma_slave_caps *caps)
1229{
1230
1231 caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1232 caps->dstn_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1233 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1234 caps->cmd_pause = true;
1235 caps->cmd_terminate = true;
1236 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1237
1238 return 0;
1239}
1240
1241#ifdef CONFIG_PM 1247#ifdef CONFIG_PM
1242static int atmel_xdmac_prepare(struct device *dev) 1248static int atmel_xdmac_prepare(struct device *dev)
1243{ 1249{
@@ -1268,9 +1274,10 @@ static int atmel_xdmac_suspend(struct device *dev)
1268 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { 1274 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1269 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1275 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1270 1276
1277 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
1271 if (at_xdmac_chan_is_cyclic(atchan)) { 1278 if (at_xdmac_chan_is_cyclic(atchan)) {
1272 if (!at_xdmac_chan_is_paused(atchan)) 1279 if (!at_xdmac_chan_is_paused(atchan))
1273 at_xdmac_control(chan, DMA_PAUSE, 0); 1280 at_xdmac_device_pause(chan);
1274 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); 1281 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1275 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); 1282 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
1276 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); 1283 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
@@ -1290,7 +1297,6 @@ static int atmel_xdmac_resume(struct device *dev)
1290 struct at_xdmac_chan *atchan; 1297 struct at_xdmac_chan *atchan;
1291 struct dma_chan *chan, *_chan; 1298 struct dma_chan *chan, *_chan;
1292 int i; 1299 int i;
1293 u32 cfg;
1294 1300
1295 clk_prepare_enable(atxdmac->clk); 1301 clk_prepare_enable(atxdmac->clk);
1296 1302
@@ -1305,8 +1311,7 @@ static int atmel_xdmac_resume(struct device *dev)
1305 at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs); 1311 at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
1306 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { 1312 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1307 atchan = to_at_xdmac_chan(chan); 1313 atchan = to_at_xdmac_chan(chan);
1308 cfg = atchan->cfg[AT_XDMAC_CUR_CFG]; 1314 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
1309 at_xdmac_chan_write(atchan, AT_XDMAC_CC, cfg);
1310 if (at_xdmac_chan_is_cyclic(atchan)) { 1315 if (at_xdmac_chan_is_cyclic(atchan)) {
1311 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda); 1316 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
1312 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); 1317 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
@@ -1407,8 +1412,14 @@ static int at_xdmac_probe(struct platform_device *pdev)
1407 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; 1412 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
1408 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; 1413 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
1409 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; 1414 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
1410 atxdmac->dma.device_control = at_xdmac_control; 1415 atxdmac->dma.device_config = at_xdmac_device_config;
1411 atxdmac->dma.device_slave_caps = at_xdmac_device_slave_caps; 1416 atxdmac->dma.device_pause = at_xdmac_device_pause;
1417 atxdmac->dma.device_resume = at_xdmac_device_resume;
1418 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
1419 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1420 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1421 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1422 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1412 1423
1413 /* Disable all chans and interrupts. */ 1424 /* Disable all chans and interrupts. */
1414 at_xdmac_off(atxdmac); 1425 at_xdmac_off(atxdmac);
@@ -1507,7 +1518,6 @@ static struct platform_driver at_xdmac_driver = {
1507 .remove = at_xdmac_remove, 1518 .remove = at_xdmac_remove,
1508 .driver = { 1519 .driver = {
1509 .name = "at_xdmac", 1520 .name = "at_xdmac",
1510 .owner = THIS_MODULE,
1511 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids), 1521 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
1512 .pm = &atmel_xdmac_dev_pm_ops, 1522 .pm = &atmel_xdmac_dev_pm_ops,
1513 } 1523 }
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 918b7b3f766f..0723096fb50a 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -436,9 +436,11 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
436 return vchan_tx_prep(&c->vc, &d->vd, flags); 436 return vchan_tx_prep(&c->vc, &d->vd, flags);
437} 437}
438 438
439static int bcm2835_dma_slave_config(struct bcm2835_chan *c, 439static int bcm2835_dma_slave_config(struct dma_chan *chan,
440 struct dma_slave_config *cfg) 440 struct dma_slave_config *cfg)
441{ 441{
442 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
443
442 if ((cfg->direction == DMA_DEV_TO_MEM && 444 if ((cfg->direction == DMA_DEV_TO_MEM &&
443 cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || 445 cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
444 (cfg->direction == DMA_MEM_TO_DEV && 446 (cfg->direction == DMA_MEM_TO_DEV &&
@@ -452,8 +454,9 @@ static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
452 return 0; 454 return 0;
453} 455}
454 456
455static int bcm2835_dma_terminate_all(struct bcm2835_chan *c) 457static int bcm2835_dma_terminate_all(struct dma_chan *chan)
456{ 458{
459 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
457 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); 460 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
458 unsigned long flags; 461 unsigned long flags;
459 int timeout = 10000; 462 int timeout = 10000;
@@ -495,24 +498,6 @@ static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
495 return 0; 498 return 0;
496} 499}
497 500
498static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
499 unsigned long arg)
500{
501 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
502
503 switch (cmd) {
504 case DMA_SLAVE_CONFIG:
505 return bcm2835_dma_slave_config(c,
506 (struct dma_slave_config *)arg);
507
508 case DMA_TERMINATE_ALL:
509 return bcm2835_dma_terminate_all(c);
510
511 default:
512 return -ENXIO;
513 }
514}
515
516static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) 501static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
517{ 502{
518 struct bcm2835_chan *c; 503 struct bcm2835_chan *c;
@@ -565,18 +550,6 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
565 return chan; 550 return chan;
566} 551}
567 552
568static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan,
569 struct dma_slave_caps *caps)
570{
571 caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
572 caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
573 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
574 caps->cmd_pause = false;
575 caps->cmd_terminate = true;
576
577 return 0;
578}
579
580static int bcm2835_dma_probe(struct platform_device *pdev) 553static int bcm2835_dma_probe(struct platform_device *pdev)
581{ 554{
582 struct bcm2835_dmadev *od; 555 struct bcm2835_dmadev *od;
@@ -615,9 +588,12 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
615 od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; 588 od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
616 od->ddev.device_tx_status = bcm2835_dma_tx_status; 589 od->ddev.device_tx_status = bcm2835_dma_tx_status;
617 od->ddev.device_issue_pending = bcm2835_dma_issue_pending; 590 od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
618 od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
619 od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; 591 od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
620 od->ddev.device_control = bcm2835_dma_control; 592 od->ddev.device_config = bcm2835_dma_slave_config;
593 od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
594 od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
595 od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
596 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
621 od->ddev.dev = &pdev->dev; 597 od->ddev.dev = &pdev->dev;
622 INIT_LIST_HEAD(&od->ddev.channels); 598 INIT_LIST_HEAD(&od->ddev.channels);
623 spin_lock_init(&od->lock); 599 spin_lock_init(&od->lock);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index e88588d8ecd3..fd22dd36985f 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1690,7 +1690,7 @@ static u32 coh901318_get_bytes_left(struct dma_chan *chan)
1690 * Pauses a transfer without losing data. Enables power save. 1690 * Pauses a transfer without losing data. Enables power save.
1691 * Use this function in conjunction with coh901318_resume. 1691 * Use this function in conjunction with coh901318_resume.
1692 */ 1692 */
1693static void coh901318_pause(struct dma_chan *chan) 1693static int coh901318_pause(struct dma_chan *chan)
1694{ 1694{
1695 u32 val; 1695 u32 val;
1696 unsigned long flags; 1696 unsigned long flags;
@@ -1730,12 +1730,13 @@ static void coh901318_pause(struct dma_chan *chan)
1730 enable_powersave(cohc); 1730 enable_powersave(cohc);
1731 1731
1732 spin_unlock_irqrestore(&cohc->lock, flags); 1732 spin_unlock_irqrestore(&cohc->lock, flags);
1733 return 0;
1733} 1734}
1734 1735
1735/* Resumes a transfer that has been stopped via 300_dma_stop(..). 1736/* Resumes a transfer that has been stopped via 300_dma_stop(..).
1736 Power save is handled. 1737 Power save is handled.
1737*/ 1738*/
1738static void coh901318_resume(struct dma_chan *chan) 1739static int coh901318_resume(struct dma_chan *chan)
1739{ 1740{
1740 u32 val; 1741 u32 val;
1741 unsigned long flags; 1742 unsigned long flags;
@@ -1760,6 +1761,7 @@ static void coh901318_resume(struct dma_chan *chan)
1760 } 1761 }
1761 1762
1762 spin_unlock_irqrestore(&cohc->lock, flags); 1763 spin_unlock_irqrestore(&cohc->lock, flags);
1764 return 0;
1763} 1765}
1764 1766
1765bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) 1767bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
@@ -2114,6 +2116,57 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
2114 return IRQ_HANDLED; 2116 return IRQ_HANDLED;
2115} 2117}
2116 2118
2119static int coh901318_terminate_all(struct dma_chan *chan)
2120{
2121 unsigned long flags;
2122 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2123 struct coh901318_desc *cohd;
2124 void __iomem *virtbase = cohc->base->virtbase;
2125
2126 /* The remainder of this function terminates the transfer */
2127 coh901318_pause(chan);
2128 spin_lock_irqsave(&cohc->lock, flags);
2129
2130 /* Clear any pending BE or TC interrupt */
2131 if (cohc->id < 32) {
2132 writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
2133 writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
2134 } else {
2135 writel(1 << (cohc->id - 32), virtbase +
2136 COH901318_BE_INT_CLEAR2);
2137 writel(1 << (cohc->id - 32), virtbase +
2138 COH901318_TC_INT_CLEAR2);
2139 }
2140
2141 enable_powersave(cohc);
2142
2143 while ((cohd = coh901318_first_active_get(cohc))) {
2144 /* release the lli allocation*/
2145 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
2146
2147 /* return desc to free-list */
2148 coh901318_desc_remove(cohd);
2149 coh901318_desc_free(cohc, cohd);
2150 }
2151
2152 while ((cohd = coh901318_first_queued(cohc))) {
2153 /* release the lli allocation*/
2154 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
2155
2156 /* return desc to free-list */
2157 coh901318_desc_remove(cohd);
2158 coh901318_desc_free(cohc, cohd);
2159 }
2160
2161
2162 cohc->nbr_active_done = 0;
2163 cohc->busy = 0;
2164
2165 spin_unlock_irqrestore(&cohc->lock, flags);
2166
2167 return 0;
2168}
2169
2117static int coh901318_alloc_chan_resources(struct dma_chan *chan) 2170static int coh901318_alloc_chan_resources(struct dma_chan *chan)
2118{ 2171{
2119 struct coh901318_chan *cohc = to_coh901318_chan(chan); 2172 struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -2156,7 +2209,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
2156 2209
2157 spin_unlock_irqrestore(&cohc->lock, flags); 2210 spin_unlock_irqrestore(&cohc->lock, flags);
2158 2211
2159 dmaengine_terminate_all(chan); 2212 coh901318_terminate_all(chan);
2160} 2213}
2161 2214
2162 2215
@@ -2461,8 +2514,8 @@ static const struct burst_table burst_sizes[] = {
2461 }, 2514 },
2462}; 2515};
2463 2516
2464static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, 2517static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
2465 struct dma_slave_config *config) 2518 struct dma_slave_config *config)
2466{ 2519{
2467 struct coh901318_chan *cohc = to_coh901318_chan(chan); 2520 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2468 dma_addr_t addr; 2521 dma_addr_t addr;
@@ -2482,7 +2535,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
2482 maxburst = config->dst_maxburst; 2535 maxburst = config->dst_maxburst;
2483 } else { 2536 } else {
2484 dev_err(COHC_2_DEV(cohc), "illegal channel mode\n"); 2537 dev_err(COHC_2_DEV(cohc), "illegal channel mode\n");
2485 return; 2538 return -EINVAL;
2486 } 2539 }
2487 2540
2488 dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n", 2541 dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n",
@@ -2528,7 +2581,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
2528 default: 2581 default:
2529 dev_err(COHC_2_DEV(cohc), 2582 dev_err(COHC_2_DEV(cohc),
2530 "bad runtimeconfig: alien address width\n"); 2583 "bad runtimeconfig: alien address width\n");
2531 return; 2584 return -EINVAL;
2532 } 2585 }
2533 2586
2534 ctrl |= burst_sizes[i].reg; 2587 ctrl |= burst_sizes[i].reg;
@@ -2538,84 +2591,12 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
2538 2591
2539 cohc->addr = addr; 2592 cohc->addr = addr;
2540 cohc->ctrl = ctrl; 2593 cohc->ctrl = ctrl;
2541}
2542
2543static int
2544coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2545 unsigned long arg)
2546{
2547 unsigned long flags;
2548 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2549 struct coh901318_desc *cohd;
2550 void __iomem *virtbase = cohc->base->virtbase;
2551
2552 if (cmd == DMA_SLAVE_CONFIG) {
2553 struct dma_slave_config *config =
2554 (struct dma_slave_config *) arg;
2555
2556 coh901318_dma_set_runtimeconfig(chan, config);
2557 return 0;
2558 }
2559
2560 if (cmd == DMA_PAUSE) {
2561 coh901318_pause(chan);
2562 return 0;
2563 }
2564
2565 if (cmd == DMA_RESUME) {
2566 coh901318_resume(chan);
2567 return 0;
2568 }
2569
2570 if (cmd != DMA_TERMINATE_ALL)
2571 return -ENXIO;
2572
2573 /* The remainder of this function terminates the transfer */
2574 coh901318_pause(chan);
2575 spin_lock_irqsave(&cohc->lock, flags);
2576
2577 /* Clear any pending BE or TC interrupt */
2578 if (cohc->id < 32) {
2579 writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
2580 writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
2581 } else {
2582 writel(1 << (cohc->id - 32), virtbase +
2583 COH901318_BE_INT_CLEAR2);
2584 writel(1 << (cohc->id - 32), virtbase +
2585 COH901318_TC_INT_CLEAR2);
2586 }
2587
2588 enable_powersave(cohc);
2589
2590 while ((cohd = coh901318_first_active_get(cohc))) {
2591 /* release the lli allocation*/
2592 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
2593
2594 /* return desc to free-list */
2595 coh901318_desc_remove(cohd);
2596 coh901318_desc_free(cohc, cohd);
2597 }
2598
2599 while ((cohd = coh901318_first_queued(cohc))) {
2600 /* release the lli allocation*/
2601 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
2602
2603 /* return desc to free-list */
2604 coh901318_desc_remove(cohd);
2605 coh901318_desc_free(cohc, cohd);
2606 }
2607
2608
2609 cohc->nbr_active_done = 0;
2610 cohc->busy = 0;
2611
2612 spin_unlock_irqrestore(&cohc->lock, flags);
2613 2594
2614 return 0; 2595 return 0;
2615} 2596}
2616 2597
2617void coh901318_base_init(struct dma_device *dma, const int *pick_chans, 2598static void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
2618 struct coh901318_base *base) 2599 struct coh901318_base *base)
2619{ 2600{
2620 int chans_i; 2601 int chans_i;
2621 int i = 0; 2602 int i = 0;
@@ -2717,7 +2698,10 @@ static int __init coh901318_probe(struct platform_device *pdev)
2717 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; 2698 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
2718 base->dma_slave.device_tx_status = coh901318_tx_status; 2699 base->dma_slave.device_tx_status = coh901318_tx_status;
2719 base->dma_slave.device_issue_pending = coh901318_issue_pending; 2700 base->dma_slave.device_issue_pending = coh901318_issue_pending;
2720 base->dma_slave.device_control = coh901318_control; 2701 base->dma_slave.device_config = coh901318_dma_set_runtimeconfig;
2702 base->dma_slave.device_pause = coh901318_pause;
2703 base->dma_slave.device_resume = coh901318_resume;
2704 base->dma_slave.device_terminate_all = coh901318_terminate_all;
2721 base->dma_slave.dev = &pdev->dev; 2705 base->dma_slave.dev = &pdev->dev;
2722 2706
2723 err = dma_async_device_register(&base->dma_slave); 2707 err = dma_async_device_register(&base->dma_slave);
@@ -2737,7 +2721,10 @@ static int __init coh901318_probe(struct platform_device *pdev)
2737 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; 2721 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
2738 base->dma_memcpy.device_tx_status = coh901318_tx_status; 2722 base->dma_memcpy.device_tx_status = coh901318_tx_status;
2739 base->dma_memcpy.device_issue_pending = coh901318_issue_pending; 2723 base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
2740 base->dma_memcpy.device_control = coh901318_control; 2724 base->dma_memcpy.device_config = coh901318_dma_set_runtimeconfig;
2725 base->dma_memcpy.device_pause = coh901318_pause;
2726 base->dma_memcpy.device_resume = coh901318_resume;
2727 base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
2741 base->dma_memcpy.dev = &pdev->dev; 2728 base->dma_memcpy.dev = &pdev->dev;
2742 /* 2729 /*
2743 * This controller can only access address at even 32bit boundaries, 2730 * This controller can only access address at even 32bit boundaries,
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index b743adf56465..512cb8e2805e 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -525,12 +525,6 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
525 return &c->txd; 525 return &c->txd;
526} 526}
527 527
528static int cpp41_cfg_chan(struct cppi41_channel *c,
529 struct dma_slave_config *cfg)
530{
531 return 0;
532}
533
534static void cppi41_compute_td_desc(struct cppi41_desc *d) 528static void cppi41_compute_td_desc(struct cppi41_desc *d)
535{ 529{
536 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; 530 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
@@ -647,28 +641,6 @@ static int cppi41_stop_chan(struct dma_chan *chan)
647 return 0; 641 return 0;
648} 642}
649 643
650static int cppi41_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
651 unsigned long arg)
652{
653 struct cppi41_channel *c = to_cpp41_chan(chan);
654 int ret;
655
656 switch (cmd) {
657 case DMA_SLAVE_CONFIG:
658 ret = cpp41_cfg_chan(c, (struct dma_slave_config *) arg);
659 break;
660
661 case DMA_TERMINATE_ALL:
662 ret = cppi41_stop_chan(chan);
663 break;
664
665 default:
666 ret = -ENXIO;
667 break;
668 }
669 return ret;
670}
671
672static void cleanup_chans(struct cppi41_dd *cdd) 644static void cleanup_chans(struct cppi41_dd *cdd)
673{ 645{
674 while (!list_empty(&cdd->ddev.channels)) { 646 while (!list_empty(&cdd->ddev.channels)) {
@@ -953,7 +925,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
953 cdd->ddev.device_tx_status = cppi41_dma_tx_status; 925 cdd->ddev.device_tx_status = cppi41_dma_tx_status;
954 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; 926 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
955 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; 927 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
956 cdd->ddev.device_control = cppi41_dma_control; 928 cdd->ddev.device_terminate_all = cppi41_stop_chan;
957 cdd->ddev.dev = dev; 929 cdd->ddev.dev = dev;
958 INIT_LIST_HEAD(&cdd->ddev.channels); 930 INIT_LIST_HEAD(&cdd->ddev.channels);
959 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; 931 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index bdeafeefa5f6..4527a3ebeac4 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -210,7 +210,7 @@ static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
210} 210}
211 211
212static int jz4740_dma_slave_config(struct dma_chan *c, 212static int jz4740_dma_slave_config(struct dma_chan *c,
213 const struct dma_slave_config *config) 213 struct dma_slave_config *config)
214{ 214{
215 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); 215 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
216 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); 216 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
@@ -290,21 +290,6 @@ static int jz4740_dma_terminate_all(struct dma_chan *c)
290 return 0; 290 return 0;
291} 291}
292 292
293static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
294 unsigned long arg)
295{
296 struct dma_slave_config *config = (struct dma_slave_config *)arg;
297
298 switch (cmd) {
299 case DMA_SLAVE_CONFIG:
300 return jz4740_dma_slave_config(chan, config);
301 case DMA_TERMINATE_ALL:
302 return jz4740_dma_terminate_all(chan);
303 default:
304 return -ENOSYS;
305 }
306}
307
308static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan) 293static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan)
309{ 294{
310 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); 295 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
@@ -561,7 +546,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
561 dd->device_issue_pending = jz4740_dma_issue_pending; 546 dd->device_issue_pending = jz4740_dma_issue_pending;
562 dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg; 547 dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg;
563 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; 548 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
564 dd->device_control = jz4740_dma_control; 549 dd->device_config = jz4740_dma_slave_config;
550 dd->device_terminate_all = jz4740_dma_terminate_all;
565 dd->dev = &pdev->dev; 551 dd->dev = &pdev->dev;
566 INIT_LIST_HEAD(&dd->channels); 552 INIT_LIST_HEAD(&dd->channels);
567 553
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index e057935e3023..f15712f2fec6 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -222,31 +222,35 @@ static void balance_ref_count(struct dma_chan *chan)
222 */ 222 */
223static int dma_chan_get(struct dma_chan *chan) 223static int dma_chan_get(struct dma_chan *chan)
224{ 224{
225 int err = -ENODEV;
226 struct module *owner = dma_chan_to_owner(chan); 225 struct module *owner = dma_chan_to_owner(chan);
226 int ret;
227 227
228 /* The channel is already in use, update client count */
228 if (chan->client_count) { 229 if (chan->client_count) {
229 __module_get(owner); 230 __module_get(owner);
230 err = 0; 231 goto out;
231 } else if (try_module_get(owner)) 232 }
232 err = 0;
233 233
234 if (err == 0) 234 if (!try_module_get(owner))
235 chan->client_count++; 235 return -ENODEV;
236 236
237 /* allocate upon first client reference */ 237 /* allocate upon first client reference */
238 if (chan->client_count == 1 && err == 0) { 238 if (chan->device->device_alloc_chan_resources) {
239 int desc_cnt = chan->device->device_alloc_chan_resources(chan); 239 ret = chan->device->device_alloc_chan_resources(chan);
240 240 if (ret < 0)
241 if (desc_cnt < 0) { 241 goto err_out;
242 err = desc_cnt;
243 chan->client_count = 0;
244 module_put(owner);
245 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
246 balance_ref_count(chan);
247 } 242 }
248 243
249 return err; 244 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
245 balance_ref_count(chan);
246
247out:
248 chan->client_count++;
249 return 0;
250
251err_out:
252 module_put(owner);
253 return ret;
250} 254}
251 255
252/** 256/**
@@ -257,11 +261,15 @@ static int dma_chan_get(struct dma_chan *chan)
257 */ 261 */
258static void dma_chan_put(struct dma_chan *chan) 262static void dma_chan_put(struct dma_chan *chan)
259{ 263{
264 /* This channel is not in use, bail out */
260 if (!chan->client_count) 265 if (!chan->client_count)
261 return; /* this channel failed alloc_chan_resources */ 266 return;
267
262 chan->client_count--; 268 chan->client_count--;
263 module_put(dma_chan_to_owner(chan)); 269 module_put(dma_chan_to_owner(chan));
264 if (chan->client_count == 0) 270
271 /* This channel is not in use anymore, free it */
272 if (!chan->client_count && chan->device->device_free_chan_resources)
265 chan->device->device_free_chan_resources(chan); 273 chan->device->device_free_chan_resources(chan);
266} 274}
267 275
@@ -471,6 +479,39 @@ static void dma_channel_rebalance(void)
471 } 479 }
472} 480}
473 481
482int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
483{
484 struct dma_device *device;
485
486 if (!chan || !caps)
487 return -EINVAL;
488
489 device = chan->device;
490
491 /* check if the channel supports slave transactions */
492 if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
493 return -ENXIO;
494
495 /*
496 * Check whether it reports it uses the generic slave
497 * capabilities, if not, that means it doesn't support any
498 * kind of slave capabilities reporting.
499 */
500 if (!device->directions)
501 return -ENXIO;
502
503 caps->src_addr_widths = device->src_addr_widths;
504 caps->dst_addr_widths = device->dst_addr_widths;
505 caps->directions = device->directions;
506 caps->residue_granularity = device->residue_granularity;
507
508 caps->cmd_pause = !!device->device_pause;
509 caps->cmd_terminate = !!device->device_terminate_all;
510
511 return 0;
512}
513EXPORT_SYMBOL_GPL(dma_get_slave_caps);
514
474static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, 515static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
475 struct dma_device *dev, 516 struct dma_device *dev,
476 dma_filter_fn fn, void *fn_param) 517 dma_filter_fn fn, void *fn_param)
@@ -811,17 +852,16 @@ int dma_async_device_register(struct dma_device *device)
811 !device->device_prep_dma_sg); 852 !device->device_prep_dma_sg);
812 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && 853 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
813 !device->device_prep_dma_cyclic); 854 !device->device_prep_dma_cyclic);
814 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
815 !device->device_control);
816 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && 855 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
817 !device->device_prep_interleaved_dma); 856 !device->device_prep_interleaved_dma);
818 857
819 BUG_ON(!device->device_alloc_chan_resources);
820 BUG_ON(!device->device_free_chan_resources);
821 BUG_ON(!device->device_tx_status); 858 BUG_ON(!device->device_tx_status);
822 BUG_ON(!device->device_issue_pending); 859 BUG_ON(!device->device_issue_pending);
823 BUG_ON(!device->dev); 860 BUG_ON(!device->dev);
824 861
862 WARN(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->directions,
863 "this driver doesn't support generic slave capabilities reporting\n");
864
825 /* note: this only matters in the 865 /* note: this only matters in the
826 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case 866 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
827 */ 867 */
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a8d7809e2f4c..220ee49633e4 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -349,14 +349,14 @@ static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
349 unsigned long data) 349 unsigned long data)
350{ 350{
351 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n", 351 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
352 current->comm, n, err, src_off, dst_off, len, data); 352 current->comm, n, err, src_off, dst_off, len, data);
353} 353}
354 354
355#define verbose_result(err, n, src_off, dst_off, len, data) ({ \ 355#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
356 if (verbose) \ 356 if (verbose) \
357 result(err, n, src_off, dst_off, len, data); \ 357 result(err, n, src_off, dst_off, len, data); \
358 else \ 358 else \
359 dbg_result(err, n, src_off, dst_off, len, data); \ 359 dbg_result(err, n, src_off, dst_off, len, data);\
360}) 360})
361 361
362static unsigned long long dmatest_persec(s64 runtime, unsigned int val) 362static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
@@ -405,7 +405,6 @@ static int dmatest_func(void *data)
405 struct dmatest_params *params; 405 struct dmatest_params *params;
406 struct dma_chan *chan; 406 struct dma_chan *chan;
407 struct dma_device *dev; 407 struct dma_device *dev;
408 unsigned int src_off, dst_off, len;
409 unsigned int error_count; 408 unsigned int error_count;
410 unsigned int failed_tests = 0; 409 unsigned int failed_tests = 0;
411 unsigned int total_tests = 0; 410 unsigned int total_tests = 0;
@@ -484,6 +483,7 @@ static int dmatest_func(void *data)
484 struct dmaengine_unmap_data *um; 483 struct dmaengine_unmap_data *um;
485 dma_addr_t srcs[src_cnt]; 484 dma_addr_t srcs[src_cnt];
486 dma_addr_t *dsts; 485 dma_addr_t *dsts;
486 unsigned int src_off, dst_off, len;
487 u8 align = 0; 487 u8 align = 0;
488 488
489 total_tests++; 489 total_tests++;
@@ -502,15 +502,21 @@ static int dmatest_func(void *data)
502 break; 502 break;
503 } 503 }
504 504
505 if (params->noverify) { 505 if (params->noverify)
506 len = params->buf_size; 506 len = params->buf_size;
507 else
508 len = dmatest_random() % params->buf_size + 1;
509
510 len = (len >> align) << align;
511 if (!len)
512 len = 1 << align;
513
514 total_len += len;
515
516 if (params->noverify) {
507 src_off = 0; 517 src_off = 0;
508 dst_off = 0; 518 dst_off = 0;
509 } else { 519 } else {
510 len = dmatest_random() % params->buf_size + 1;
511 len = (len >> align) << align;
512 if (!len)
513 len = 1 << align;
514 src_off = dmatest_random() % (params->buf_size - len + 1); 520 src_off = dmatest_random() % (params->buf_size - len + 1);
515 dst_off = dmatest_random() % (params->buf_size - len + 1); 521 dst_off = dmatest_random() % (params->buf_size - len + 1);
516 522
@@ -523,11 +529,6 @@ static int dmatest_func(void *data)
523 params->buf_size); 529 params->buf_size);
524 } 530 }
525 531
526 len = (len >> align) << align;
527 if (!len)
528 len = 1 << align;
529 total_len += len;
530
531 um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt, 532 um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
532 GFP_KERNEL); 533 GFP_KERNEL);
533 if (!um) { 534 if (!um) {
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 5c062548957c..455b7a4f1e87 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -61,6 +61,13 @@
61 */ 61 */
62#define NR_DESCS_PER_CHANNEL 64 62#define NR_DESCS_PER_CHANNEL 64
63 63
64/* The set of bus widths supported by the DMA controller */
65#define DW_DMA_BUSWIDTHS \
66 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
67 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
68 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
69 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
70
64/*----------------------------------------------------------------------*/ 71/*----------------------------------------------------------------------*/
65 72
66static struct device *chan2dev(struct dma_chan *chan) 73static struct device *chan2dev(struct dma_chan *chan)
@@ -955,8 +962,7 @@ static inline void convert_burst(u32 *maxburst)
955 *maxburst = 0; 962 *maxburst = 0;
956} 963}
957 964
958static int 965static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
959set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
960{ 966{
961 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 967 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
962 968
@@ -973,16 +979,25 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
973 return 0; 979 return 0;
974} 980}
975 981
976static inline void dwc_chan_pause(struct dw_dma_chan *dwc) 982static int dwc_pause(struct dma_chan *chan)
977{ 983{
978 u32 cfglo = channel_readl(dwc, CFG_LO); 984 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
979 unsigned int count = 20; /* timeout iterations */ 985 unsigned long flags;
986 unsigned int count = 20; /* timeout iterations */
987 u32 cfglo;
988
989 spin_lock_irqsave(&dwc->lock, flags);
980 990
991 cfglo = channel_readl(dwc, CFG_LO);
981 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); 992 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
982 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) 993 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
983 udelay(2); 994 udelay(2);
984 995
985 dwc->paused = true; 996 dwc->paused = true;
997
998 spin_unlock_irqrestore(&dwc->lock, flags);
999
1000 return 0;
986} 1001}
987 1002
988static inline void dwc_chan_resume(struct dw_dma_chan *dwc) 1003static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
@@ -994,53 +1009,48 @@ static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
994 dwc->paused = false; 1009 dwc->paused = false;
995} 1010}
996 1011
997static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1012static int dwc_resume(struct dma_chan *chan)
998 unsigned long arg)
999{ 1013{
1000 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1014 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1001 struct dw_dma *dw = to_dw_dma(chan->device);
1002 struct dw_desc *desc, *_desc;
1003 unsigned long flags; 1015 unsigned long flags;
1004 LIST_HEAD(list);
1005 1016
1006 if (cmd == DMA_PAUSE) { 1017 if (!dwc->paused)
1007 spin_lock_irqsave(&dwc->lock, flags); 1018 return 0;
1008 1019
1009 dwc_chan_pause(dwc); 1020 spin_lock_irqsave(&dwc->lock, flags);
1010 1021
1011 spin_unlock_irqrestore(&dwc->lock, flags); 1022 dwc_chan_resume(dwc);
1012 } else if (cmd == DMA_RESUME) {
1013 if (!dwc->paused)
1014 return 0;
1015 1023
1016 spin_lock_irqsave(&dwc->lock, flags); 1024 spin_unlock_irqrestore(&dwc->lock, flags);
1017 1025
1018 dwc_chan_resume(dwc); 1026 return 0;
1027}
1019 1028
1020 spin_unlock_irqrestore(&dwc->lock, flags); 1029static int dwc_terminate_all(struct dma_chan *chan)
1021 } else if (cmd == DMA_TERMINATE_ALL) { 1030{
1022 spin_lock_irqsave(&dwc->lock, flags); 1031 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1032 struct dw_dma *dw = to_dw_dma(chan->device);
1033 struct dw_desc *desc, *_desc;
1034 unsigned long flags;
1035 LIST_HEAD(list);
1023 1036
1024 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 1037 spin_lock_irqsave(&dwc->lock, flags);
1025 1038
1026 dwc_chan_disable(dw, dwc); 1039 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
1040
1041 dwc_chan_disable(dw, dwc);
1027 1042
1028 dwc_chan_resume(dwc); 1043 dwc_chan_resume(dwc);
1029 1044
1030 /* active_list entries will end up before queued entries */ 1045 /* active_list entries will end up before queued entries */
1031 list_splice_init(&dwc->queue, &list); 1046 list_splice_init(&dwc->queue, &list);
1032 list_splice_init(&dwc->active_list, &list); 1047 list_splice_init(&dwc->active_list, &list);
1033 1048
1034 spin_unlock_irqrestore(&dwc->lock, flags); 1049 spin_unlock_irqrestore(&dwc->lock, flags);
1035 1050
1036 /* Flush all pending and queued descriptors */ 1051 /* Flush all pending and queued descriptors */
1037 list_for_each_entry_safe(desc, _desc, &list, desc_node) 1052 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1038 dwc_descriptor_complete(dwc, desc, false); 1053 dwc_descriptor_complete(dwc, desc, false);
1039 } else if (cmd == DMA_SLAVE_CONFIG) {
1040 return set_runtime_config(chan, (struct dma_slave_config *)arg);
1041 } else {
1042 return -ENXIO;
1043 }
1044 1054
1045 return 0; 1055 return 0;
1046} 1056}
@@ -1551,7 +1561,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1551 } 1561 }
1552 } else { 1562 } else {
1553 dw->nr_masters = pdata->nr_masters; 1563 dw->nr_masters = pdata->nr_masters;
1554 memcpy(dw->data_width, pdata->data_width, 4); 1564 for (i = 0; i < dw->nr_masters; i++)
1565 dw->data_width[i] = pdata->data_width[i];
1555 } 1566 }
1556 1567
1557 /* Calculate all channel mask before DMA setup */ 1568 /* Calculate all channel mask before DMA setup */
@@ -1656,13 +1667,23 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1656 dw->dma.device_free_chan_resources = dwc_free_chan_resources; 1667 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1657 1668
1658 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; 1669 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1659
1660 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; 1670 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1661 dw->dma.device_control = dwc_control; 1671
1672 dw->dma.device_config = dwc_config;
1673 dw->dma.device_pause = dwc_pause;
1674 dw->dma.device_resume = dwc_resume;
1675 dw->dma.device_terminate_all = dwc_terminate_all;
1662 1676
1663 dw->dma.device_tx_status = dwc_tx_status; 1677 dw->dma.device_tx_status = dwc_tx_status;
1664 dw->dma.device_issue_pending = dwc_issue_pending; 1678 dw->dma.device_issue_pending = dwc_issue_pending;
1665 1679
1680 /* DMA capabilities */
1681 dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1682 dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1683 dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1684 BIT(DMA_MEM_TO_MEM);
1685 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1686
1666 err = dma_async_device_register(&dw->dma); 1687 err = dma_async_device_register(&dw->dma);
1667 if (err) 1688 if (err)
1668 goto err_dma_register; 1689 goto err_dma_register;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 32ea1aca7a0e..6565a361e7e5 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -100,7 +100,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
100{ 100{
101 struct device_node *np = pdev->dev.of_node; 101 struct device_node *np = pdev->dev.of_node;
102 struct dw_dma_platform_data *pdata; 102 struct dw_dma_platform_data *pdata;
103 u32 tmp, arr[4]; 103 u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
104 104
105 if (!np) { 105 if (!np) {
106 dev_err(&pdev->dev, "Missing DT data\n"); 106 dev_err(&pdev->dev, "Missing DT data\n");
@@ -127,7 +127,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
127 pdata->block_size = tmp; 127 pdata->block_size = tmp;
128 128
129 if (!of_property_read_u32(np, "dma-masters", &tmp)) { 129 if (!of_property_read_u32(np, "dma-masters", &tmp)) {
130 if (tmp > 4) 130 if (tmp > DW_DMA_MAX_NR_MASTERS)
131 return NULL; 131 return NULL;
132 132
133 pdata->nr_masters = tmp; 133 pdata->nr_masters = tmp;
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 848e232f7cc7..241ff2b1402b 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -252,7 +252,7 @@ struct dw_dma_chan {
252 u8 src_master; 252 u8 src_master;
253 u8 dst_master; 253 u8 dst_master;
254 254
255 /* configuration passed via DMA_SLAVE_CONFIG */ 255 /* configuration passed via .device_config */
256 struct dma_slave_config dma_sconfig; 256 struct dma_slave_config dma_sconfig;
257}; 257};
258 258
@@ -285,7 +285,7 @@ struct dw_dma {
285 285
286 /* hardware configuration */ 286 /* hardware configuration */
287 unsigned char nr_masters; 287 unsigned char nr_masters;
288 unsigned char data_width[4]; 288 unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
289}; 289};
290 290
291static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) 291static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index b969206439b7..276157f22612 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/dmaengine.h> 16#include <linux/dmaengine.h>
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18#include <linux/edma.h>
18#include <linux/err.h> 19#include <linux/err.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/interrupt.h> 21#include <linux/interrupt.h>
@@ -244,8 +245,9 @@ static void edma_execute(struct edma_chan *echan)
244 } 245 }
245} 246}
246 247
247static int edma_terminate_all(struct edma_chan *echan) 248static int edma_terminate_all(struct dma_chan *chan)
248{ 249{
250 struct edma_chan *echan = to_edma_chan(chan);
249 unsigned long flags; 251 unsigned long flags;
250 LIST_HEAD(head); 252 LIST_HEAD(head);
251 253
@@ -273,9 +275,11 @@ static int edma_terminate_all(struct edma_chan *echan)
273 return 0; 275 return 0;
274} 276}
275 277
276static int edma_slave_config(struct edma_chan *echan, 278static int edma_slave_config(struct dma_chan *chan,
277 struct dma_slave_config *cfg) 279 struct dma_slave_config *cfg)
278{ 280{
281 struct edma_chan *echan = to_edma_chan(chan);
282
279 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 283 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
280 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 284 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
281 return -EINVAL; 285 return -EINVAL;
@@ -285,8 +289,10 @@ static int edma_slave_config(struct edma_chan *echan,
285 return 0; 289 return 0;
286} 290}
287 291
288static int edma_dma_pause(struct edma_chan *echan) 292static int edma_dma_pause(struct dma_chan *chan)
289{ 293{
294 struct edma_chan *echan = to_edma_chan(chan);
295
290 /* Pause/Resume only allowed with cyclic mode */ 296 /* Pause/Resume only allowed with cyclic mode */
291 if (!echan->edesc || !echan->edesc->cyclic) 297 if (!echan->edesc || !echan->edesc->cyclic)
292 return -EINVAL; 298 return -EINVAL;
@@ -295,8 +301,10 @@ static int edma_dma_pause(struct edma_chan *echan)
295 return 0; 301 return 0;
296} 302}
297 303
298static int edma_dma_resume(struct edma_chan *echan) 304static int edma_dma_resume(struct dma_chan *chan)
299{ 305{
306 struct edma_chan *echan = to_edma_chan(chan);
307
300 /* Pause/Resume only allowed with cyclic mode */ 308 /* Pause/Resume only allowed with cyclic mode */
301 if (!echan->edesc->cyclic) 309 if (!echan->edesc->cyclic)
302 return -EINVAL; 310 return -EINVAL;
@@ -305,36 +313,6 @@ static int edma_dma_resume(struct edma_chan *echan)
305 return 0; 313 return 0;
306} 314}
307 315
308static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
309 unsigned long arg)
310{
311 int ret = 0;
312 struct dma_slave_config *config;
313 struct edma_chan *echan = to_edma_chan(chan);
314
315 switch (cmd) {
316 case DMA_TERMINATE_ALL:
317 edma_terminate_all(echan);
318 break;
319 case DMA_SLAVE_CONFIG:
320 config = (struct dma_slave_config *)arg;
321 ret = edma_slave_config(echan, config);
322 break;
323 case DMA_PAUSE:
324 ret = edma_dma_pause(echan);
325 break;
326
327 case DMA_RESUME:
328 ret = edma_dma_resume(echan);
329 break;
330
331 default:
332 ret = -ENOSYS;
333 }
334
335 return ret;
336}
337
338/* 316/*
339 * A PaRAM set configuration abstraction used by other modes 317 * A PaRAM set configuration abstraction used by other modes
340 * @chan: Channel who's PaRAM set we're configuring 318 * @chan: Channel who's PaRAM set we're configuring
@@ -557,7 +535,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
557 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 535 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
558} 536}
559 537
560struct dma_async_tx_descriptor *edma_prep_dma_memcpy( 538static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
561 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 539 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
562 size_t len, unsigned long tx_flags) 540 size_t len, unsigned long tx_flags)
563{ 541{
@@ -994,19 +972,6 @@ static void __init edma_chan_init(struct edma_cc *ecc,
994 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 972 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
995 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 973 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
996 974
997static int edma_dma_device_slave_caps(struct dma_chan *dchan,
998 struct dma_slave_caps *caps)
999{
1000 caps->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1001 caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS;
1002 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1003 caps->cmd_pause = true;
1004 caps->cmd_terminate = true;
1005 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1006
1007 return 0;
1008}
1009
1010static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, 975static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
1011 struct device *dev) 976 struct device *dev)
1012{ 977{
@@ -1017,8 +982,16 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
1017 dma->device_free_chan_resources = edma_free_chan_resources; 982 dma->device_free_chan_resources = edma_free_chan_resources;
1018 dma->device_issue_pending = edma_issue_pending; 983 dma->device_issue_pending = edma_issue_pending;
1019 dma->device_tx_status = edma_tx_status; 984 dma->device_tx_status = edma_tx_status;
1020 dma->device_control = edma_control; 985 dma->device_config = edma_slave_config;
1021 dma->device_slave_caps = edma_dma_device_slave_caps; 986 dma->device_pause = edma_dma_pause;
987 dma->device_resume = edma_dma_resume;
988 dma->device_terminate_all = edma_terminate_all;
989
990 dma->src_addr_widths = EDMA_DMA_BUSWIDTHS;
991 dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
992 dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
993 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
994
1022 dma->dev = dev; 995 dma->dev = dev;
1023 996
1024 /* 997 /*
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 7650470196c4..24e5290faa32 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -144,7 +144,7 @@ struct ep93xx_dma_desc {
144 * @queue: pending descriptors which are handled next 144 * @queue: pending descriptors which are handled next
145 * @free_list: list of free descriptors which can be used 145 * @free_list: list of free descriptors which can be used
146 * @runtime_addr: physical address currently used as dest/src (M2M only). This 146 * @runtime_addr: physical address currently used as dest/src (M2M only). This
147 * is set via %DMA_SLAVE_CONFIG before slave operation is 147 * is set via .device_config before slave operation is
148 * prepared 148 * prepared
149 * @runtime_ctrl: M2M runtime values for the control register. 149 * @runtime_ctrl: M2M runtime values for the control register.
150 * 150 *
@@ -1164,13 +1164,14 @@ fail:
1164 1164
1165/** 1165/**
1166 * ep93xx_dma_terminate_all - terminate all transactions 1166 * ep93xx_dma_terminate_all - terminate all transactions
1167 * @edmac: channel 1167 * @chan: channel
1168 * 1168 *
1169 * Stops all DMA transactions. All descriptors are put back to the 1169 * Stops all DMA transactions. All descriptors are put back to the
1170 * @edmac->free_list and callbacks are _not_ called. 1170 * @edmac->free_list and callbacks are _not_ called.
1171 */ 1171 */
1172static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac) 1172static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1173{ 1173{
1174 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1174 struct ep93xx_dma_desc *desc, *_d; 1175 struct ep93xx_dma_desc *desc, *_d;
1175 unsigned long flags; 1176 unsigned long flags;
1176 LIST_HEAD(list); 1177 LIST_HEAD(list);
@@ -1194,9 +1195,10 @@ static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
1194 return 0; 1195 return 0;
1195} 1196}
1196 1197
1197static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, 1198static int ep93xx_dma_slave_config(struct dma_chan *chan,
1198 struct dma_slave_config *config) 1199 struct dma_slave_config *config)
1199{ 1200{
1201 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1200 enum dma_slave_buswidth width; 1202 enum dma_slave_buswidth width;
1201 unsigned long flags; 1203 unsigned long flags;
1202 u32 addr, ctrl; 1204 u32 addr, ctrl;
@@ -1242,36 +1244,6 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1242} 1244}
1243 1245
1244/** 1246/**
1245 * ep93xx_dma_control - manipulate all pending operations on a channel
1246 * @chan: channel
1247 * @cmd: control command to perform
1248 * @arg: optional argument
1249 *
1250 * Controls the channel. Function returns %0 in case of success or negative
1251 * error in case of failure.
1252 */
1253static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1254 unsigned long arg)
1255{
1256 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1257 struct dma_slave_config *config;
1258
1259 switch (cmd) {
1260 case DMA_TERMINATE_ALL:
1261 return ep93xx_dma_terminate_all(edmac);
1262
1263 case DMA_SLAVE_CONFIG:
1264 config = (struct dma_slave_config *)arg;
1265 return ep93xx_dma_slave_config(edmac, config);
1266
1267 default:
1268 break;
1269 }
1270
1271 return -ENOSYS;
1272}
1273
1274/**
1275 * ep93xx_dma_tx_status - check if a transaction is completed 1247 * ep93xx_dma_tx_status - check if a transaction is completed
1276 * @chan: channel 1248 * @chan: channel
1277 * @cookie: transaction specific cookie 1249 * @cookie: transaction specific cookie
@@ -1352,7 +1324,8 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
1352 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; 1324 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1353 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; 1325 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1354 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; 1326 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1355 dma_dev->device_control = ep93xx_dma_control; 1327 dma_dev->device_config = ep93xx_dma_slave_config;
1328 dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1356 dma_dev->device_issue_pending = ep93xx_dma_issue_pending; 1329 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1357 dma_dev->device_tx_status = ep93xx_dma_tx_status; 1330 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1358 1331
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index e9ebb89e1711..09e2842d15ec 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -289,62 +289,69 @@ static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
289 kfree(fsl_desc); 289 kfree(fsl_desc);
290} 290}
291 291
292static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 292static int fsl_edma_terminate_all(struct dma_chan *chan)
293 unsigned long arg)
294{ 293{
295 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 294 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
296 struct dma_slave_config *cfg = (void *)arg;
297 unsigned long flags; 295 unsigned long flags;
298 LIST_HEAD(head); 296 LIST_HEAD(head);
299 297
300 switch (cmd) { 298 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
301 case DMA_TERMINATE_ALL: 299 fsl_edma_disable_request(fsl_chan);
302 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 300 fsl_chan->edesc = NULL;
301 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
302 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
303 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
304 return 0;
305}
306
307static int fsl_edma_pause(struct dma_chan *chan)
308{
309 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
310 unsigned long flags;
311
312 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
313 if (fsl_chan->edesc) {
303 fsl_edma_disable_request(fsl_chan); 314 fsl_edma_disable_request(fsl_chan);
304 fsl_chan->edesc = NULL; 315 fsl_chan->status = DMA_PAUSED;
305 vchan_get_all_descriptors(&fsl_chan->vchan, &head); 316 }
306 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 317 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
307 vchan_dma_desc_free_list(&fsl_chan->vchan, &head); 318 return 0;
308 return 0; 319}
309
310 case DMA_SLAVE_CONFIG:
311 fsl_chan->fsc.dir = cfg->direction;
312 if (cfg->direction == DMA_DEV_TO_MEM) {
313 fsl_chan->fsc.dev_addr = cfg->src_addr;
314 fsl_chan->fsc.addr_width = cfg->src_addr_width;
315 fsl_chan->fsc.burst = cfg->src_maxburst;
316 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
317 } else if (cfg->direction == DMA_MEM_TO_DEV) {
318 fsl_chan->fsc.dev_addr = cfg->dst_addr;
319 fsl_chan->fsc.addr_width = cfg->dst_addr_width;
320 fsl_chan->fsc.burst = cfg->dst_maxburst;
321 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
322 } else {
323 return -EINVAL;
324 }
325 return 0;
326 320
327 case DMA_PAUSE: 321static int fsl_edma_resume(struct dma_chan *chan)
328 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 322{
329 if (fsl_chan->edesc) { 323 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
330 fsl_edma_disable_request(fsl_chan); 324 unsigned long flags;
331 fsl_chan->status = DMA_PAUSED;
332 }
333 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
334 return 0;
335
336 case DMA_RESUME:
337 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
338 if (fsl_chan->edesc) {
339 fsl_edma_enable_request(fsl_chan);
340 fsl_chan->status = DMA_IN_PROGRESS;
341 }
342 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
343 return 0;
344 325
345 default: 326 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
346 return -ENXIO; 327 if (fsl_chan->edesc) {
328 fsl_edma_enable_request(fsl_chan);
329 fsl_chan->status = DMA_IN_PROGRESS;
330 }
331 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
332 return 0;
333}
334
335static int fsl_edma_slave_config(struct dma_chan *chan,
336 struct dma_slave_config *cfg)
337{
338 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
339
340 fsl_chan->fsc.dir = cfg->direction;
341 if (cfg->direction == DMA_DEV_TO_MEM) {
342 fsl_chan->fsc.dev_addr = cfg->src_addr;
343 fsl_chan->fsc.addr_width = cfg->src_addr_width;
344 fsl_chan->fsc.burst = cfg->src_maxburst;
345 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
346 } else if (cfg->direction == DMA_MEM_TO_DEV) {
347 fsl_chan->fsc.dev_addr = cfg->dst_addr;
348 fsl_chan->fsc.addr_width = cfg->dst_addr_width;
349 fsl_chan->fsc.burst = cfg->dst_maxburst;
350 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
351 } else {
352 return -EINVAL;
347 } 353 }
354 return 0;
348} 355}
349 356
350static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, 357static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
@@ -780,18 +787,6 @@ static void fsl_edma_free_chan_resources(struct dma_chan *chan)
780 fsl_chan->tcd_pool = NULL; 787 fsl_chan->tcd_pool = NULL;
781} 788}
782 789
783static int fsl_dma_device_slave_caps(struct dma_chan *dchan,
784 struct dma_slave_caps *caps)
785{
786 caps->src_addr_widths = FSL_EDMA_BUSWIDTHS;
787 caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS;
788 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
789 caps->cmd_pause = true;
790 caps->cmd_terminate = true;
791
792 return 0;
793}
794
795static int 790static int
796fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) 791fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
797{ 792{
@@ -917,9 +912,15 @@ static int fsl_edma_probe(struct platform_device *pdev)
917 fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status; 912 fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
918 fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg; 913 fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
919 fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic; 914 fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
920 fsl_edma->dma_dev.device_control = fsl_edma_control; 915 fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
916 fsl_edma->dma_dev.device_pause = fsl_edma_pause;
917 fsl_edma->dma_dev.device_resume = fsl_edma_resume;
918 fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
921 fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending; 919 fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
922 fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps; 920
921 fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
922 fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
923 fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
923 924
924 platform_set_drvdata(pdev, fsl_edma); 925 platform_set_drvdata(pdev, fsl_edma);
925 926
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 38821cdf862b..300f821f1890 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -941,84 +941,56 @@ fail:
941 return NULL; 941 return NULL;
942} 942}
943 943
944/** 944static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
945 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
946 * @chan: DMA channel
947 * @sgl: scatterlist to transfer to/from
948 * @sg_len: number of entries in @scatterlist
949 * @direction: DMA direction
950 * @flags: DMAEngine flags
951 * @context: transaction context (ignored)
952 *
953 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
954 * DMA_SLAVE API, this gets the device-specific information from the
955 * chan->private variable.
956 */
957static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
958 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
959 enum dma_transfer_direction direction, unsigned long flags,
960 void *context)
961{ 945{
962 /*
963 * This operation is not supported on the Freescale DMA controller
964 *
965 * However, we need to provide the function pointer to allow the
966 * device_control() method to work.
967 */
968 return NULL;
969}
970
971static int fsl_dma_device_control(struct dma_chan *dchan,
972 enum dma_ctrl_cmd cmd, unsigned long arg)
973{
974 struct dma_slave_config *config;
975 struct fsldma_chan *chan; 946 struct fsldma_chan *chan;
976 int size;
977 947
978 if (!dchan) 948 if (!dchan)
979 return -EINVAL; 949 return -EINVAL;
980 950
981 chan = to_fsl_chan(dchan); 951 chan = to_fsl_chan(dchan);
982 952
983 switch (cmd) { 953 spin_lock_bh(&chan->desc_lock);
984 case DMA_TERMINATE_ALL:
985 spin_lock_bh(&chan->desc_lock);
986
987 /* Halt the DMA engine */
988 dma_halt(chan);
989 954
990 /* Remove and free all of the descriptors in the LD queue */ 955 /* Halt the DMA engine */
991 fsldma_free_desc_list(chan, &chan->ld_pending); 956 dma_halt(chan);
992 fsldma_free_desc_list(chan, &chan->ld_running);
993 fsldma_free_desc_list(chan, &chan->ld_completed);
994 chan->idle = true;
995 957
996 spin_unlock_bh(&chan->desc_lock); 958 /* Remove and free all of the descriptors in the LD queue */
997 return 0; 959 fsldma_free_desc_list(chan, &chan->ld_pending);
960 fsldma_free_desc_list(chan, &chan->ld_running);
961 fsldma_free_desc_list(chan, &chan->ld_completed);
962 chan->idle = true;
998 963
999 case DMA_SLAVE_CONFIG: 964 spin_unlock_bh(&chan->desc_lock);
1000 config = (struct dma_slave_config *)arg; 965 return 0;
966}
1001 967
1002 /* make sure the channel supports setting burst size */ 968static int fsl_dma_device_config(struct dma_chan *dchan,
1003 if (!chan->set_request_count) 969 struct dma_slave_config *config)
1004 return -ENXIO; 970{
971 struct fsldma_chan *chan;
972 int size;
1005 973
1006 /* we set the controller burst size depending on direction */ 974 if (!dchan)
1007 if (config->direction == DMA_MEM_TO_DEV) 975 return -EINVAL;
1008 size = config->dst_addr_width * config->dst_maxburst;
1009 else
1010 size = config->src_addr_width * config->src_maxburst;
1011 976
1012 chan->set_request_count(chan, size); 977 chan = to_fsl_chan(dchan);
1013 return 0;
1014 978
1015 default: 979 /* make sure the channel supports setting burst size */
980 if (!chan->set_request_count)
1016 return -ENXIO; 981 return -ENXIO;
1017 }
1018 982
983 /* we set the controller burst size depending on direction */
984 if (config->direction == DMA_MEM_TO_DEV)
985 size = config->dst_addr_width * config->dst_maxburst;
986 else
987 size = config->src_addr_width * config->src_maxburst;
988
989 chan->set_request_count(chan, size);
1019 return 0; 990 return 0;
1020} 991}
1021 992
993
1022/** 994/**
1023 * fsl_dma_memcpy_issue_pending - Issue the DMA start command 995 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
1024 * @chan : Freescale DMA channel 996 * @chan : Freescale DMA channel
@@ -1395,10 +1367,15 @@ static int fsldma_of_probe(struct platform_device *op)
1395 fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; 1367 fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
1396 fdev->common.device_tx_status = fsl_tx_status; 1368 fdev->common.device_tx_status = fsl_tx_status;
1397 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1369 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1398 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; 1370 fdev->common.device_config = fsl_dma_device_config;
1399 fdev->common.device_control = fsl_dma_device_control; 1371 fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
1400 fdev->common.dev = &op->dev; 1372 fdev->common.dev = &op->dev;
1401 1373
1374 fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS;
1375 fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS;
1376 fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1377 fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1378
1402 dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); 1379 dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1403 1380
1404 platform_set_drvdata(op, fdev); 1381 platform_set_drvdata(op, fdev);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 239c20c84382..31bffccdcc75 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -83,6 +83,10 @@
83#define FSL_DMA_DGSR_EOSI 0x02 83#define FSL_DMA_DGSR_EOSI 0x02
84#define FSL_DMA_DGSR_EOLSI 0x01 84#define FSL_DMA_DGSR_EOLSI 0x01
85 85
86#define FSL_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
87 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
88 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
89 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
86typedef u64 __bitwise v64; 90typedef u64 __bitwise v64;
87typedef u32 __bitwise v32; 91typedef u32 __bitwise v32;
88 92
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
new file mode 100644
index 000000000000..ed045a9ad634
--- /dev/null
+++ b/drivers/dma/img-mdc-dma.c
@@ -0,0 +1,1011 @@
1/*
2 * IMG Multi-threaded DMA Controller (MDC)
3 *
4 * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd.
5 * Copyright (C) 2014 Google, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 */
11
12#include <linux/clk.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmaengine.h>
15#include <linux/dmapool.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/irq.h>
19#include <linux/kernel.h>
20#include <linux/mfd/syscon.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_dma.h>
25#include <linux/platform_device.h>
26#include <linux/regmap.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29
30#include "dmaengine.h"
31#include "virt-dma.h"
32
33#define MDC_MAX_DMA_CHANNELS 32
34
35#define MDC_GENERAL_CONFIG 0x000
36#define MDC_GENERAL_CONFIG_LIST_IEN BIT(31)
37#define MDC_GENERAL_CONFIG_IEN BIT(29)
38#define MDC_GENERAL_CONFIG_LEVEL_INT BIT(28)
39#define MDC_GENERAL_CONFIG_INC_W BIT(12)
40#define MDC_GENERAL_CONFIG_INC_R BIT(8)
41#define MDC_GENERAL_CONFIG_PHYSICAL_W BIT(7)
42#define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT 4
43#define MDC_GENERAL_CONFIG_WIDTH_W_MASK 0x7
44#define MDC_GENERAL_CONFIG_PHYSICAL_R BIT(3)
45#define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT 0
46#define MDC_GENERAL_CONFIG_WIDTH_R_MASK 0x7
47
48#define MDC_READ_PORT_CONFIG 0x004
49#define MDC_READ_PORT_CONFIG_STHREAD_SHIFT 28
50#define MDC_READ_PORT_CONFIG_STHREAD_MASK 0xf
51#define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT 24
52#define MDC_READ_PORT_CONFIG_RTHREAD_MASK 0xf
53#define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT 16
54#define MDC_READ_PORT_CONFIG_WTHREAD_MASK 0xf
55#define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT 4
56#define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK 0xff
57#define MDC_READ_PORT_CONFIG_DREQ_ENABLE BIT(1)
58
59#define MDC_READ_ADDRESS 0x008
60
61#define MDC_WRITE_ADDRESS 0x00c
62
63#define MDC_TRANSFER_SIZE 0x010
64#define MDC_TRANSFER_SIZE_MASK 0xffffff
65
66#define MDC_LIST_NODE_ADDRESS 0x014
67
68#define MDC_CMDS_PROCESSED 0x018
69#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT 16
70#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK 0x3f
71#define MDC_CMDS_PROCESSED_INT_ACTIVE BIT(8)
72#define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT 0
73#define MDC_CMDS_PROCESSED_CMDS_DONE_MASK 0x3f
74
75#define MDC_CONTROL_AND_STATUS 0x01c
76#define MDC_CONTROL_AND_STATUS_CANCEL BIT(20)
77#define MDC_CONTROL_AND_STATUS_LIST_EN BIT(4)
78#define MDC_CONTROL_AND_STATUS_EN BIT(0)
79
80#define MDC_ACTIVE_TRANSFER_SIZE 0x030
81
82#define MDC_GLOBAL_CONFIG_A 0x900
83#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT 16
84#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK 0xff
85#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT 8
86#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK 0xff
87#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT 0
88#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK 0xff
89
90struct mdc_hw_list_desc {
91 u32 gen_conf;
92 u32 readport_conf;
93 u32 read_addr;
94 u32 write_addr;
95 u32 xfer_size;
96 u32 node_addr;
97 u32 cmds_done;
98 u32 ctrl_status;
99 /*
100 * Not part of the list descriptor, but instead used by the CPU to
101 * traverse the list.
102 */
103 struct mdc_hw_list_desc *next_desc;
104};
105
106struct mdc_tx_desc {
107 struct mdc_chan *chan;
108 struct virt_dma_desc vd;
109 dma_addr_t list_phys;
110 struct mdc_hw_list_desc *list;
111 bool cyclic;
112 bool cmd_loaded;
113 unsigned int list_len;
114 unsigned int list_period_len;
115 size_t list_xfer_size;
116 unsigned int list_cmds_done;
117};
118
119struct mdc_chan {
120 struct mdc_dma *mdma;
121 struct virt_dma_chan vc;
122 struct dma_slave_config config;
123 struct mdc_tx_desc *desc;
124 int irq;
125 unsigned int periph;
126 unsigned int thread;
127 unsigned int chan_nr;
128};
129
130struct mdc_dma_soc_data {
131 void (*enable_chan)(struct mdc_chan *mchan);
132 void (*disable_chan)(struct mdc_chan *mchan);
133};
134
135struct mdc_dma {
136 struct dma_device dma_dev;
137 void __iomem *regs;
138 struct clk *clk;
139 struct dma_pool *desc_pool;
140 struct regmap *periph_regs;
141 spinlock_t lock;
142 unsigned int nr_threads;
143 unsigned int nr_channels;
144 unsigned int bus_width;
145 unsigned int max_burst_mult;
146 unsigned int max_xfer_size;
147 const struct mdc_dma_soc_data *soc;
148 struct mdc_chan channels[MDC_MAX_DMA_CHANNELS];
149};
150
151static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg)
152{
153 return readl(mdma->regs + reg);
154}
155
156static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg)
157{
158 writel(val, mdma->regs + reg);
159}
160
161static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg)
162{
163 return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg);
164}
165
166static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg)
167{
168 mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg);
169}
170
171static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c)
172{
173 return container_of(to_virt_chan(c), struct mdc_chan, vc);
174}
175
176static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t)
177{
178 struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx);
179
180 return container_of(vdesc, struct mdc_tx_desc, vd);
181}
182
183static inline struct device *mdma2dev(struct mdc_dma *mdma)
184{
185 return mdma->dma_dev.dev;
186}
187
188static inline unsigned int to_mdc_width(unsigned int bytes)
189{
190 return ffs(bytes) - 1;
191}
192
193static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc,
194 unsigned int bytes)
195{
196 ldesc->gen_conf |= to_mdc_width(bytes) <<
197 MDC_GENERAL_CONFIG_WIDTH_R_SHIFT;
198}
199
200static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc,
201 unsigned int bytes)
202{
203 ldesc->gen_conf |= to_mdc_width(bytes) <<
204 MDC_GENERAL_CONFIG_WIDTH_W_SHIFT;
205}
206
207static void mdc_list_desc_config(struct mdc_chan *mchan,
208 struct mdc_hw_list_desc *ldesc,
209 enum dma_transfer_direction dir,
210 dma_addr_t src, dma_addr_t dst, size_t len)
211{
212 struct mdc_dma *mdma = mchan->mdma;
213 unsigned int max_burst, burst_size;
214
215 ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN |
216 MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
217 MDC_GENERAL_CONFIG_PHYSICAL_R;
218 ldesc->readport_conf =
219 (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
220 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
221 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
222 ldesc->read_addr = src;
223 ldesc->write_addr = dst;
224 ldesc->xfer_size = len - 1;
225 ldesc->node_addr = 0;
226 ldesc->cmds_done = 0;
227 ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN |
228 MDC_CONTROL_AND_STATUS_EN;
229 ldesc->next_desc = NULL;
230
231 if (IS_ALIGNED(dst, mdma->bus_width) &&
232 IS_ALIGNED(src, mdma->bus_width))
233 max_burst = mdma->bus_width * mdma->max_burst_mult;
234 else
235 max_burst = mdma->bus_width * (mdma->max_burst_mult - 1);
236
237 if (dir == DMA_MEM_TO_DEV) {
238 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R;
239 ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
240 mdc_set_read_width(ldesc, mdma->bus_width);
241 mdc_set_write_width(ldesc, mchan->config.dst_addr_width);
242 burst_size = min(max_burst, mchan->config.dst_maxburst *
243 mchan->config.dst_addr_width);
244 } else if (dir == DMA_DEV_TO_MEM) {
245 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W;
246 ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
247 mdc_set_read_width(ldesc, mchan->config.src_addr_width);
248 mdc_set_write_width(ldesc, mdma->bus_width);
249 burst_size = min(max_burst, mchan->config.src_maxburst *
250 mchan->config.src_addr_width);
251 } else {
252 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R |
253 MDC_GENERAL_CONFIG_INC_W;
254 mdc_set_read_width(ldesc, mdma->bus_width);
255 mdc_set_write_width(ldesc, mdma->bus_width);
256 burst_size = max_burst;
257 }
258 ldesc->readport_conf |= (burst_size - 1) <<
259 MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT;
260}
261
262static void mdc_list_desc_free(struct mdc_tx_desc *mdesc)
263{
264 struct mdc_dma *mdma = mdesc->chan->mdma;
265 struct mdc_hw_list_desc *curr, *next;
266 dma_addr_t curr_phys, next_phys;
267
268 curr = mdesc->list;
269 curr_phys = mdesc->list_phys;
270 while (curr) {
271 next = curr->next_desc;
272 next_phys = curr->node_addr;
273 dma_pool_free(mdma->desc_pool, curr, curr_phys);
274 curr = next;
275 curr_phys = next_phys;
276 }
277}
278
279static void mdc_desc_free(struct virt_dma_desc *vd)
280{
281 struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx);
282
283 mdc_list_desc_free(mdesc);
284 kfree(mdesc);
285}
286
287static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
288 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len,
289 unsigned long flags)
290{
291 struct mdc_chan *mchan = to_mdc_chan(chan);
292 struct mdc_dma *mdma = mchan->mdma;
293 struct mdc_tx_desc *mdesc;
294 struct mdc_hw_list_desc *curr, *prev = NULL;
295 dma_addr_t curr_phys, prev_phys;
296
297 if (!len)
298 return NULL;
299
300 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
301 if (!mdesc)
302 return NULL;
303 mdesc->chan = mchan;
304 mdesc->list_xfer_size = len;
305
306 while (len > 0) {
307 size_t xfer_size;
308
309 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys);
310 if (!curr)
311 goto free_desc;
312
313 if (prev) {
314 prev->node_addr = curr_phys;
315 prev->next_desc = curr;
316 } else {
317 mdesc->list_phys = curr_phys;
318 mdesc->list = curr;
319 }
320
321 xfer_size = min_t(size_t, mdma->max_xfer_size, len);
322
323 mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest,
324 xfer_size);
325
326 prev = curr;
327 prev_phys = curr_phys;
328
329 mdesc->list_len++;
330 src += xfer_size;
331 dest += xfer_size;
332 len -= xfer_size;
333 }
334
335 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
336
337free_desc:
338 mdc_desc_free(&mdesc->vd);
339
340 return NULL;
341}
342
343static int mdc_check_slave_width(struct mdc_chan *mchan,
344 enum dma_transfer_direction dir)
345{
346 enum dma_slave_buswidth width;
347
348 if (dir == DMA_MEM_TO_DEV)
349 width = mchan->config.dst_addr_width;
350 else
351 width = mchan->config.src_addr_width;
352
353 switch (width) {
354 case DMA_SLAVE_BUSWIDTH_1_BYTE:
355 case DMA_SLAVE_BUSWIDTH_2_BYTES:
356 case DMA_SLAVE_BUSWIDTH_4_BYTES:
357 case DMA_SLAVE_BUSWIDTH_8_BYTES:
358 break;
359 default:
360 return -EINVAL;
361 }
362
363 if (width > mchan->mdma->bus_width)
364 return -EINVAL;
365
366 return 0;
367}
368
369static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
370 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
371 size_t period_len, enum dma_transfer_direction dir,
372 unsigned long flags)
373{
374 struct mdc_chan *mchan = to_mdc_chan(chan);
375 struct mdc_dma *mdma = mchan->mdma;
376 struct mdc_tx_desc *mdesc;
377 struct mdc_hw_list_desc *curr, *prev = NULL;
378 dma_addr_t curr_phys, prev_phys;
379
380 if (!buf_len && !period_len)
381 return NULL;
382
383 if (!is_slave_direction(dir))
384 return NULL;
385
386 if (mdc_check_slave_width(mchan, dir) < 0)
387 return NULL;
388
389 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
390 if (!mdesc)
391 return NULL;
392 mdesc->chan = mchan;
393 mdesc->cyclic = true;
394 mdesc->list_xfer_size = buf_len;
395 mdesc->list_period_len = DIV_ROUND_UP(period_len,
396 mdma->max_xfer_size);
397
398 while (buf_len > 0) {
399 size_t remainder = min(period_len, buf_len);
400
401 while (remainder > 0) {
402 size_t xfer_size;
403
404 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
405 &curr_phys);
406 if (!curr)
407 goto free_desc;
408
409 if (!prev) {
410 mdesc->list_phys = curr_phys;
411 mdesc->list = curr;
412 } else {
413 prev->node_addr = curr_phys;
414 prev->next_desc = curr;
415 }
416
417 xfer_size = min_t(size_t, mdma->max_xfer_size,
418 remainder);
419
420 if (dir == DMA_MEM_TO_DEV) {
421 mdc_list_desc_config(mchan, curr, dir,
422 buf_addr,
423 mchan->config.dst_addr,
424 xfer_size);
425 } else {
426 mdc_list_desc_config(mchan, curr, dir,
427 mchan->config.src_addr,
428 buf_addr,
429 xfer_size);
430 }
431
432 prev = curr;
433 prev_phys = curr_phys;
434
435 mdesc->list_len++;
436 buf_addr += xfer_size;
437 buf_len -= xfer_size;
438 remainder -= xfer_size;
439 }
440 }
441 prev->node_addr = mdesc->list_phys;
442
443 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
444
445free_desc:
446 mdc_desc_free(&mdesc->vd);
447
448 return NULL;
449}
450
451static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
452 struct dma_chan *chan, struct scatterlist *sgl,
453 unsigned int sg_len, enum dma_transfer_direction dir,
454 unsigned long flags, void *context)
455{
456 struct mdc_chan *mchan = to_mdc_chan(chan);
457 struct mdc_dma *mdma = mchan->mdma;
458 struct mdc_tx_desc *mdesc;
459 struct scatterlist *sg;
460 struct mdc_hw_list_desc *curr, *prev = NULL;
461 dma_addr_t curr_phys, prev_phys;
462 unsigned int i;
463
464 if (!sgl)
465 return NULL;
466
467 if (!is_slave_direction(dir))
468 return NULL;
469
470 if (mdc_check_slave_width(mchan, dir) < 0)
471 return NULL;
472
473 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
474 if (!mdesc)
475 return NULL;
476 mdesc->chan = mchan;
477
478 for_each_sg(sgl, sg, sg_len, i) {
479 dma_addr_t buf = sg_dma_address(sg);
480 size_t buf_len = sg_dma_len(sg);
481
482 while (buf_len > 0) {
483 size_t xfer_size;
484
485 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
486 &curr_phys);
487 if (!curr)
488 goto free_desc;
489
490 if (!prev) {
491 mdesc->list_phys = curr_phys;
492 mdesc->list = curr;
493 } else {
494 prev->node_addr = curr_phys;
495 prev->next_desc = curr;
496 }
497
498 xfer_size = min_t(size_t, mdma->max_xfer_size,
499 buf_len);
500
501 if (dir == DMA_MEM_TO_DEV) {
502 mdc_list_desc_config(mchan, curr, dir, buf,
503 mchan->config.dst_addr,
504 xfer_size);
505 } else {
506 mdc_list_desc_config(mchan, curr, dir,
507 mchan->config.src_addr,
508 buf, xfer_size);
509 }
510
511 prev = curr;
512 prev_phys = curr_phys;
513
514 mdesc->list_len++;
515 mdesc->list_xfer_size += xfer_size;
516 buf += xfer_size;
517 buf_len -= xfer_size;
518 }
519 }
520
521 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
522
523free_desc:
524 mdc_desc_free(&mdesc->vd);
525
526 return NULL;
527}
528
529static void mdc_issue_desc(struct mdc_chan *mchan)
530{
531 struct mdc_dma *mdma = mchan->mdma;
532 struct virt_dma_desc *vd;
533 struct mdc_tx_desc *mdesc;
534 u32 val;
535
536 vd = vchan_next_desc(&mchan->vc);
537 if (!vd)
538 return;
539
540 list_del(&vd->node);
541
542 mdesc = to_mdc_desc(&vd->tx);
543 mchan->desc = mdesc;
544
545 dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n",
546 mchan->chan_nr);
547
548 mdma->soc->enable_chan(mchan);
549
550 val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG);
551 val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN |
552 MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
553 MDC_GENERAL_CONFIG_PHYSICAL_R;
554 mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG);
555 val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
556 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
557 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
558 mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG);
559 mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS);
560 val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS);
561 val |= MDC_CONTROL_AND_STATUS_LIST_EN;
562 mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS);
563}
564
565static void mdc_issue_pending(struct dma_chan *chan)
566{
567 struct mdc_chan *mchan = to_mdc_chan(chan);
568 unsigned long flags;
569
570 spin_lock_irqsave(&mchan->vc.lock, flags);
571 if (vchan_issue_pending(&mchan->vc) && !mchan->desc)
572 mdc_issue_desc(mchan);
573 spin_unlock_irqrestore(&mchan->vc.lock, flags);
574}
575
576static enum dma_status mdc_tx_status(struct dma_chan *chan,
577 dma_cookie_t cookie, struct dma_tx_state *txstate)
578{
579 struct mdc_chan *mchan = to_mdc_chan(chan);
580 struct mdc_tx_desc *mdesc;
581 struct virt_dma_desc *vd;
582 unsigned long flags;
583 size_t bytes = 0;
584 int ret;
585
586 ret = dma_cookie_status(chan, cookie, txstate);
587 if (ret == DMA_COMPLETE)
588 return ret;
589
590 if (!txstate)
591 return ret;
592
593 spin_lock_irqsave(&mchan->vc.lock, flags);
594 vd = vchan_find_desc(&mchan->vc, cookie);
595 if (vd) {
596 mdesc = to_mdc_desc(&vd->tx);
597 bytes = mdesc->list_xfer_size;
598 } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) {
599 struct mdc_hw_list_desc *ldesc;
600 u32 val1, val2, done, processed, residue;
601 int i, cmds;
602
603 mdesc = mchan->desc;
604
605 /*
606 * Determine the number of commands that haven't been
607 * processed (handled by the IRQ handler) yet.
608 */
609 do {
610 val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
611 ~MDC_CMDS_PROCESSED_INT_ACTIVE;
612 residue = mdc_chan_readl(mchan,
613 MDC_ACTIVE_TRANSFER_SIZE);
614 val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
615 ~MDC_CMDS_PROCESSED_INT_ACTIVE;
616 } while (val1 != val2);
617
618 done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
619 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
620 processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
621 MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
622 cmds = (done - processed) %
623 (MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1);
624
625 /*
626 * If the command loaded event hasn't been processed yet, then
627 * the difference above includes an extra command.
628 */
629 if (!mdesc->cmd_loaded)
630 cmds--;
631 else
632 cmds += mdesc->list_cmds_done;
633
634 bytes = mdesc->list_xfer_size;
635 ldesc = mdesc->list;
636 for (i = 0; i < cmds; i++) {
637 bytes -= ldesc->xfer_size + 1;
638 ldesc = ldesc->next_desc;
639 }
640 if (ldesc) {
641 if (residue != MDC_TRANSFER_SIZE_MASK)
642 bytes -= ldesc->xfer_size - residue;
643 else
644 bytes -= ldesc->xfer_size + 1;
645 }
646 }
647 spin_unlock_irqrestore(&mchan->vc.lock, flags);
648
649 dma_set_residue(txstate, bytes);
650
651 return ret;
652}
653
654static int mdc_terminate_all(struct dma_chan *chan)
655{
656 struct mdc_chan *mchan = to_mdc_chan(chan);
657 struct mdc_tx_desc *mdesc;
658 unsigned long flags;
659 LIST_HEAD(head);
660
661 spin_lock_irqsave(&mchan->vc.lock, flags);
662
663 mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
664 MDC_CONTROL_AND_STATUS);
665
666 mdesc = mchan->desc;
667 mchan->desc = NULL;
668 vchan_get_all_descriptors(&mchan->vc, &head);
669
670 spin_unlock_irqrestore(&mchan->vc.lock, flags);
671
672 if (mdesc)
673 mdc_desc_free(&mdesc->vd);
674 vchan_dma_desc_free_list(&mchan->vc, &head);
675
676 return 0;
677}
678
679static int mdc_slave_config(struct dma_chan *chan,
680 struct dma_slave_config *config)
681{
682 struct mdc_chan *mchan = to_mdc_chan(chan);
683 unsigned long flags;
684
685 spin_lock_irqsave(&mchan->vc.lock, flags);
686 mchan->config = *config;
687 spin_unlock_irqrestore(&mchan->vc.lock, flags);
688
689 return 0;
690}
691
692static int mdc_alloc_chan_resources(struct dma_chan *chan)
693{
694 return 0;
695}
696
697static void mdc_free_chan_resources(struct dma_chan *chan)
698{
699 struct mdc_chan *mchan = to_mdc_chan(chan);
700 struct mdc_dma *mdma = mchan->mdma;
701
702 mdc_terminate_all(chan);
703
704 mdma->soc->disable_chan(mchan);
705}
706
707static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
708{
709 struct mdc_chan *mchan = (struct mdc_chan *)dev_id;
710 struct mdc_tx_desc *mdesc;
711 u32 val, processed, done1, done2;
712 unsigned int i;
713
714 spin_lock(&mchan->vc.lock);
715
716 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
717 processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
718 MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
719 /*
720 * CMDS_DONE may have incremented between reading CMDS_PROCESSED
721 * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we
722 * didn't miss a command completion.
723 */
724 do {
725 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
726 done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
727 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
728 val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
729 MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
730 MDC_CMDS_PROCESSED_INT_ACTIVE);
731 val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
732 mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
733 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
734 done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
735 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
736 } while (done1 != done2);
737
738 dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
739
740 mdesc = mchan->desc;
741 if (!mdesc) {
742 dev_warn(mdma2dev(mchan->mdma),
743 "IRQ with no active descriptor on channel %d\n",
744 mchan->chan_nr);
745 goto out;
746 }
747
748 for (i = processed; i != done1;
749 i = (i + 1) % (MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1)) {
750 /*
751 * The first interrupt in a transfer indicates that the
752 * command list has been loaded, not that a command has
753 * been completed.
754 */
755 if (!mdesc->cmd_loaded) {
756 mdesc->cmd_loaded = true;
757 continue;
758 }
759
760 mdesc->list_cmds_done++;
761 if (mdesc->cyclic) {
762 mdesc->list_cmds_done %= mdesc->list_len;
763 if (mdesc->list_cmds_done % mdesc->list_period_len == 0)
764 vchan_cyclic_callback(&mdesc->vd);
765 } else if (mdesc->list_cmds_done == mdesc->list_len) {
766 mchan->desc = NULL;
767 vchan_cookie_complete(&mdesc->vd);
768 mdc_issue_desc(mchan);
769 break;
770 }
771 }
772out:
773 spin_unlock(&mchan->vc.lock);
774
775 return IRQ_HANDLED;
776}
777
778static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec,
779 struct of_dma *ofdma)
780{
781 struct mdc_dma *mdma = ofdma->of_dma_data;
782 struct dma_chan *chan;
783
784 if (dma_spec->args_count != 3)
785 return NULL;
786
787 list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) {
788 struct mdc_chan *mchan = to_mdc_chan(chan);
789
790 if (!(dma_spec->args[1] & BIT(mchan->chan_nr)))
791 continue;
792 if (dma_get_slave_channel(chan)) {
793 mchan->periph = dma_spec->args[0];
794 mchan->thread = dma_spec->args[2];
795 return chan;
796 }
797 }
798
799 return NULL;
800}
801
802#define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch) (0x120 + 0x4 * ((ch) / 4))
803#define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4))
804#define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK 0x3f
805
806static void pistachio_mdc_enable_chan(struct mdc_chan *mchan)
807{
808 struct mdc_dma *mdma = mchan->mdma;
809
810 regmap_update_bits(mdma->periph_regs,
811 PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
812 PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
813 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
814 mchan->periph <<
815 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr));
816}
817
818static void pistachio_mdc_disable_chan(struct mdc_chan *mchan)
819{
820 struct mdc_dma *mdma = mchan->mdma;
821
822 regmap_update_bits(mdma->periph_regs,
823 PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
824 PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
825 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
826 0);
827}
828
829static const struct mdc_dma_soc_data pistachio_mdc_data = {
830 .enable_chan = pistachio_mdc_enable_chan,
831 .disable_chan = pistachio_mdc_disable_chan,
832};
833
834static const struct of_device_id mdc_dma_of_match[] = {
835 { .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, },
836 { },
837};
838MODULE_DEVICE_TABLE(of, mdc_dma_of_match);
839
840static int mdc_dma_probe(struct platform_device *pdev)
841{
842 struct mdc_dma *mdma;
843 struct resource *res;
844 const struct of_device_id *match;
845 unsigned int i;
846 u32 val;
847 int ret;
848
849 mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL);
850 if (!mdma)
851 return -ENOMEM;
852 platform_set_drvdata(pdev, mdma);
853
854 match = of_match_device(mdc_dma_of_match, &pdev->dev);
855 mdma->soc = match->data;
856
857 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
858 mdma->regs = devm_ioremap_resource(&pdev->dev, res);
859 if (IS_ERR(mdma->regs))
860 return PTR_ERR(mdma->regs);
861
862 mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
863 "img,cr-periph");
864 if (IS_ERR(mdma->periph_regs))
865 return PTR_ERR(mdma->periph_regs);
866
867 mdma->clk = devm_clk_get(&pdev->dev, "sys");
868 if (IS_ERR(mdma->clk))
869 return PTR_ERR(mdma->clk);
870
871 ret = clk_prepare_enable(mdma->clk);
872 if (ret)
873 return ret;
874
875 dma_cap_zero(mdma->dma_dev.cap_mask);
876 dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask);
877 dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask);
878 dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask);
879 dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask);
880
881 val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A);
882 mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) &
883 MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK;
884 mdma->nr_threads =
885 1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) &
886 MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK);
887 mdma->bus_width =
888 (1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) &
889 MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8;
890 /*
891 * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes
892 * are supported, this makes it possible for the value reported in
893 * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size
894 * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or
895 * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining. To eliminate this
896 * ambiguity, restrict transfer sizes to one bus-width less than the
897 * actual maximum.
898 */
899 mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width;
900
901 of_property_read_u32(pdev->dev.of_node, "dma-channels",
902 &mdma->nr_channels);
903 ret = of_property_read_u32(pdev->dev.of_node,
904 "img,max-burst-multiplier",
905 &mdma->max_burst_mult);
906 if (ret)
907 goto disable_clk;
908
909 mdma->dma_dev.dev = &pdev->dev;
910 mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
911 mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
912 mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
913 mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources;
914 mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
915 mdma->dma_dev.device_tx_status = mdc_tx_status;
916 mdma->dma_dev.device_issue_pending = mdc_issue_pending;
917 mdma->dma_dev.device_terminate_all = mdc_terminate_all;
918 mdma->dma_dev.device_config = mdc_slave_config;
919
920 mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
921 mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
922 for (i = 1; i <= mdma->bus_width; i <<= 1) {
923 mdma->dma_dev.src_addr_widths |= BIT(i);
924 mdma->dma_dev.dst_addr_widths |= BIT(i);
925 }
926
927 INIT_LIST_HEAD(&mdma->dma_dev.channels);
928 for (i = 0; i < mdma->nr_channels; i++) {
929 struct mdc_chan *mchan = &mdma->channels[i];
930
931 mchan->mdma = mdma;
932 mchan->chan_nr = i;
933 mchan->irq = platform_get_irq(pdev, i);
934 if (mchan->irq < 0) {
935 ret = mchan->irq;
936 goto disable_clk;
937 }
938 ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq,
939 IRQ_TYPE_LEVEL_HIGH,
940 dev_name(&pdev->dev), mchan);
941 if (ret < 0)
942 goto disable_clk;
943
944 mchan->vc.desc_free = mdc_desc_free;
945 vchan_init(&mchan->vc, &mdma->dma_dev);
946 }
947
948 mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
949 sizeof(struct mdc_hw_list_desc),
950 4, 0);
951 if (!mdma->desc_pool) {
952 ret = -ENOMEM;
953 goto disable_clk;
954 }
955
956 ret = dma_async_device_register(&mdma->dma_dev);
957 if (ret)
958 goto disable_clk;
959
960 ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma);
961 if (ret)
962 goto unregister;
963
964 dev_info(&pdev->dev, "MDC with %u channels and %u threads\n",
965 mdma->nr_channels, mdma->nr_threads);
966
967 return 0;
968
969unregister:
970 dma_async_device_unregister(&mdma->dma_dev);
971disable_clk:
972 clk_disable_unprepare(mdma->clk);
973 return ret;
974}
975
976static int mdc_dma_remove(struct platform_device *pdev)
977{
978 struct mdc_dma *mdma = platform_get_drvdata(pdev);
979 struct mdc_chan *mchan, *next;
980
981 of_dma_controller_free(pdev->dev.of_node);
982 dma_async_device_unregister(&mdma->dma_dev);
983
984 list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels,
985 vc.chan.device_node) {
986 list_del(&mchan->vc.chan.device_node);
987
988 synchronize_irq(mchan->irq);
989 devm_free_irq(&pdev->dev, mchan->irq, mchan);
990
991 tasklet_kill(&mchan->vc.task);
992 }
993
994 clk_disable_unprepare(mdma->clk);
995
996 return 0;
997}
998
999static struct platform_driver mdc_dma_driver = {
1000 .driver = {
1001 .name = "img-mdc-dma",
1002 .of_match_table = of_match_ptr(mdc_dma_of_match),
1003 },
1004 .probe = mdc_dma_probe,
1005 .remove = mdc_dma_remove,
1006};
1007module_platform_driver(mdc_dma_driver);
1008
1009MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver");
1010MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
1011MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 10bbc0a675b0..eed405976ea9 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -230,11 +230,6 @@ static inline int is_imx1_dma(struct imxdma_engine *imxdma)
230 return imxdma->devtype == IMX1_DMA; 230 return imxdma->devtype == IMX1_DMA;
231} 231}
232 232
233static inline int is_imx21_dma(struct imxdma_engine *imxdma)
234{
235 return imxdma->devtype == IMX21_DMA;
236}
237
238static inline int is_imx27_dma(struct imxdma_engine *imxdma) 233static inline int is_imx27_dma(struct imxdma_engine *imxdma)
239{ 234{
240 return imxdma->devtype == IMX27_DMA; 235 return imxdma->devtype == IMX27_DMA;
@@ -669,69 +664,67 @@ out:
669 664
670} 665}
671 666
672static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 667static int imxdma_terminate_all(struct dma_chan *chan)
673 unsigned long arg)
674{ 668{
675 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 669 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
676 struct dma_slave_config *dmaengine_cfg = (void *)arg;
677 struct imxdma_engine *imxdma = imxdmac->imxdma; 670 struct imxdma_engine *imxdma = imxdmac->imxdma;
678 unsigned long flags; 671 unsigned long flags;
679 unsigned int mode = 0;
680
681 switch (cmd) {
682 case DMA_TERMINATE_ALL:
683 imxdma_disable_hw(imxdmac);
684
685 spin_lock_irqsave(&imxdma->lock, flags);
686 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
687 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
688 spin_unlock_irqrestore(&imxdma->lock, flags);
689 return 0;
690 case DMA_SLAVE_CONFIG:
691 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
692 imxdmac->per_address = dmaengine_cfg->src_addr;
693 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
694 imxdmac->word_size = dmaengine_cfg->src_addr_width;
695 } else {
696 imxdmac->per_address = dmaengine_cfg->dst_addr;
697 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
698 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
699 }
700 672
701 switch (imxdmac->word_size) { 673 imxdma_disable_hw(imxdmac);
702 case DMA_SLAVE_BUSWIDTH_1_BYTE:
703 mode = IMX_DMA_MEMSIZE_8;
704 break;
705 case DMA_SLAVE_BUSWIDTH_2_BYTES:
706 mode = IMX_DMA_MEMSIZE_16;
707 break;
708 default:
709 case DMA_SLAVE_BUSWIDTH_4_BYTES:
710 mode = IMX_DMA_MEMSIZE_32;
711 break;
712 }
713 674
714 imxdmac->hw_chaining = 0; 675 spin_lock_irqsave(&imxdma->lock, flags);
676 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
677 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
678 spin_unlock_irqrestore(&imxdma->lock, flags);
679 return 0;
680}
715 681
716 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | 682static int imxdma_config(struct dma_chan *chan,
717 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | 683 struct dma_slave_config *dmaengine_cfg)
718 CCR_REN; 684{
719 imxdmac->ccr_to_device = 685 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
720 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | 686 struct imxdma_engine *imxdma = imxdmac->imxdma;
721 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; 687 unsigned int mode = 0;
722 imx_dmav1_writel(imxdma, imxdmac->dma_request,
723 DMA_RSSR(imxdmac->channel));
724 688
725 /* Set burst length */ 689 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
726 imx_dmav1_writel(imxdma, imxdmac->watermark_level * 690 imxdmac->per_address = dmaengine_cfg->src_addr;
727 imxdmac->word_size, DMA_BLR(imxdmac->channel)); 691 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
692 imxdmac->word_size = dmaengine_cfg->src_addr_width;
693 } else {
694 imxdmac->per_address = dmaengine_cfg->dst_addr;
695 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
696 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
697 }
728 698
729 return 0; 699 switch (imxdmac->word_size) {
700 case DMA_SLAVE_BUSWIDTH_1_BYTE:
701 mode = IMX_DMA_MEMSIZE_8;
702 break;
703 case DMA_SLAVE_BUSWIDTH_2_BYTES:
704 mode = IMX_DMA_MEMSIZE_16;
705 break;
730 default: 706 default:
731 return -ENOSYS; 707 case DMA_SLAVE_BUSWIDTH_4_BYTES:
708 mode = IMX_DMA_MEMSIZE_32;
709 break;
732 } 710 }
733 711
734 return -EINVAL; 712 imxdmac->hw_chaining = 0;
713
714 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
715 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
716 CCR_REN;
717 imxdmac->ccr_to_device =
718 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
719 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
720 imx_dmav1_writel(imxdma, imxdmac->dma_request,
721 DMA_RSSR(imxdmac->channel));
722
723 /* Set burst length */
724 imx_dmav1_writel(imxdma, imxdmac->watermark_level *
725 imxdmac->word_size, DMA_BLR(imxdmac->channel));
726
727 return 0;
735} 728}
736 729
737static enum dma_status imxdma_tx_status(struct dma_chan *chan, 730static enum dma_status imxdma_tx_status(struct dma_chan *chan,
@@ -1184,7 +1177,8 @@ static int __init imxdma_probe(struct platform_device *pdev)
1184 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; 1177 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1185 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; 1178 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1186 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved; 1179 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1187 imxdma->dma_device.device_control = imxdma_control; 1180 imxdma->dma_device.device_config = imxdma_config;
1181 imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
1188 imxdma->dma_device.device_issue_pending = imxdma_issue_pending; 1182 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1189 1183
1190 platform_set_drvdata(pdev, imxdma); 1184 platform_set_drvdata(pdev, imxdma);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d0df198f62e9..18c0a131e4e4 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -830,20 +830,29 @@ static int sdma_load_context(struct sdma_channel *sdmac)
830 return ret; 830 return ret;
831} 831}
832 832
833static void sdma_disable_channel(struct sdma_channel *sdmac) 833static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
834{
835 return container_of(chan, struct sdma_channel, chan);
836}
837
838static int sdma_disable_channel(struct dma_chan *chan)
834{ 839{
840 struct sdma_channel *sdmac = to_sdma_chan(chan);
835 struct sdma_engine *sdma = sdmac->sdma; 841 struct sdma_engine *sdma = sdmac->sdma;
836 int channel = sdmac->channel; 842 int channel = sdmac->channel;
837 843
838 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); 844 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
839 sdmac->status = DMA_ERROR; 845 sdmac->status = DMA_ERROR;
846
847 return 0;
840} 848}
841 849
842static int sdma_config_channel(struct sdma_channel *sdmac) 850static int sdma_config_channel(struct dma_chan *chan)
843{ 851{
852 struct sdma_channel *sdmac = to_sdma_chan(chan);
844 int ret; 853 int ret;
845 854
846 sdma_disable_channel(sdmac); 855 sdma_disable_channel(chan);
847 856
848 sdmac->event_mask[0] = 0; 857 sdmac->event_mask[0] = 0;
849 sdmac->event_mask[1] = 0; 858 sdmac->event_mask[1] = 0;
@@ -935,11 +944,6 @@ out:
935 return ret; 944 return ret;
936} 945}
937 946
938static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
939{
940 return container_of(chan, struct sdma_channel, chan);
941}
942
943static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) 947static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
944{ 948{
945 unsigned long flags; 949 unsigned long flags;
@@ -1004,7 +1008,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
1004 struct sdma_channel *sdmac = to_sdma_chan(chan); 1008 struct sdma_channel *sdmac = to_sdma_chan(chan);
1005 struct sdma_engine *sdma = sdmac->sdma; 1009 struct sdma_engine *sdma = sdmac->sdma;
1006 1010
1007 sdma_disable_channel(sdmac); 1011 sdma_disable_channel(chan);
1008 1012
1009 if (sdmac->event_id0) 1013 if (sdmac->event_id0)
1010 sdma_event_disable(sdmac, sdmac->event_id0); 1014 sdma_event_disable(sdmac, sdmac->event_id0);
@@ -1203,35 +1207,24 @@ err_out:
1203 return NULL; 1207 return NULL;
1204} 1208}
1205 1209
1206static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1210static int sdma_config(struct dma_chan *chan,
1207 unsigned long arg) 1211 struct dma_slave_config *dmaengine_cfg)
1208{ 1212{
1209 struct sdma_channel *sdmac = to_sdma_chan(chan); 1213 struct sdma_channel *sdmac = to_sdma_chan(chan);
1210 struct dma_slave_config *dmaengine_cfg = (void *)arg;
1211
1212 switch (cmd) {
1213 case DMA_TERMINATE_ALL:
1214 sdma_disable_channel(sdmac);
1215 return 0;
1216 case DMA_SLAVE_CONFIG:
1217 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1218 sdmac->per_address = dmaengine_cfg->src_addr;
1219 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1220 dmaengine_cfg->src_addr_width;
1221 sdmac->word_size = dmaengine_cfg->src_addr_width;
1222 } else {
1223 sdmac->per_address = dmaengine_cfg->dst_addr;
1224 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1225 dmaengine_cfg->dst_addr_width;
1226 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1227 }
1228 sdmac->direction = dmaengine_cfg->direction;
1229 return sdma_config_channel(sdmac);
1230 default:
1231 return -ENOSYS;
1232 }
1233 1214
1234 return -EINVAL; 1215 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1216 sdmac->per_address = dmaengine_cfg->src_addr;
1217 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1218 dmaengine_cfg->src_addr_width;
1219 sdmac->word_size = dmaengine_cfg->src_addr_width;
1220 } else {
1221 sdmac->per_address = dmaengine_cfg->dst_addr;
1222 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1223 dmaengine_cfg->dst_addr_width;
1224 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1225 }
1226 sdmac->direction = dmaengine_cfg->direction;
1227 return sdma_config_channel(chan);
1235} 1228}
1236 1229
1237static enum dma_status sdma_tx_status(struct dma_chan *chan, 1230static enum dma_status sdma_tx_status(struct dma_chan *chan,
@@ -1303,15 +1296,15 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
1303 if (header->ram_code_start + header->ram_code_size > fw->size) 1296 if (header->ram_code_start + header->ram_code_size > fw->size)
1304 goto err_firmware; 1297 goto err_firmware;
1305 switch (header->version_major) { 1298 switch (header->version_major) {
1306 case 1: 1299 case 1:
1307 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; 1300 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1308 break; 1301 break;
1309 case 2: 1302 case 2:
1310 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2; 1303 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1311 break; 1304 break;
1312 default: 1305 default:
1313 dev_err(sdma->dev, "unknown firmware version\n"); 1306 dev_err(sdma->dev, "unknown firmware version\n");
1314 goto err_firmware; 1307 goto err_firmware;
1315 } 1308 }
1316 1309
1317 addr = (void *)header + header->script_addrs_start; 1310 addr = (void *)header + header->script_addrs_start;
@@ -1479,7 +1472,7 @@ static int sdma_probe(struct platform_device *pdev)
1479 if (ret) 1472 if (ret)
1480 return ret; 1473 return ret;
1481 1474
1482 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1475 sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
1483 if (!sdma) 1476 if (!sdma)
1484 return -ENOMEM; 1477 return -ENOMEM;
1485 1478
@@ -1488,48 +1481,34 @@ static int sdma_probe(struct platform_device *pdev)
1488 sdma->dev = &pdev->dev; 1481 sdma->dev = &pdev->dev;
1489 sdma->drvdata = drvdata; 1482 sdma->drvdata = drvdata;
1490 1483
1491 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1492 irq = platform_get_irq(pdev, 0); 1484 irq = platform_get_irq(pdev, 0);
1493 if (!iores || irq < 0) { 1485 if (irq < 0)
1494 ret = -EINVAL; 1486 return irq;
1495 goto err_irq;
1496 }
1497 1487
1498 if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) { 1488 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1499 ret = -EBUSY; 1489 sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
1500 goto err_request_region; 1490 if (IS_ERR(sdma->regs))
1501 } 1491 return PTR_ERR(sdma->regs);
1502 1492
1503 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1493 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1504 if (IS_ERR(sdma->clk_ipg)) { 1494 if (IS_ERR(sdma->clk_ipg))
1505 ret = PTR_ERR(sdma->clk_ipg); 1495 return PTR_ERR(sdma->clk_ipg);
1506 goto err_clk;
1507 }
1508 1496
1509 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 1497 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1510 if (IS_ERR(sdma->clk_ahb)) { 1498 if (IS_ERR(sdma->clk_ahb))
1511 ret = PTR_ERR(sdma->clk_ahb); 1499 return PTR_ERR(sdma->clk_ahb);
1512 goto err_clk;
1513 }
1514 1500
1515 clk_prepare(sdma->clk_ipg); 1501 clk_prepare(sdma->clk_ipg);
1516 clk_prepare(sdma->clk_ahb); 1502 clk_prepare(sdma->clk_ahb);
1517 1503
1518 sdma->regs = ioremap(iores->start, resource_size(iores)); 1504 ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
1519 if (!sdma->regs) { 1505 sdma);
1520 ret = -ENOMEM;
1521 goto err_ioremap;
1522 }
1523
1524 ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
1525 if (ret) 1506 if (ret)
1526 goto err_request_irq; 1507 return ret;
1527 1508
1528 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); 1509 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1529 if (!sdma->script_addrs) { 1510 if (!sdma->script_addrs)
1530 ret = -ENOMEM; 1511 return -ENOMEM;
1531 goto err_alloc;
1532 }
1533 1512
1534 /* initially no scripts available */ 1513 /* initially no scripts available */
1535 saddr_arr = (s32 *)sdma->script_addrs; 1514 saddr_arr = (s32 *)sdma->script_addrs;
@@ -1600,7 +1579,12 @@ static int sdma_probe(struct platform_device *pdev)
1600 sdma->dma_device.device_tx_status = sdma_tx_status; 1579 sdma->dma_device.device_tx_status = sdma_tx_status;
1601 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; 1580 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1602 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 1581 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1603 sdma->dma_device.device_control = sdma_control; 1582 sdma->dma_device.device_config = sdma_config;
1583 sdma->dma_device.device_terminate_all = sdma_disable_channel;
1584 sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1585 sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1586 sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1587 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1604 sdma->dma_device.device_issue_pending = sdma_issue_pending; 1588 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1605 sdma->dma_device.dev->dma_parms = &sdma->dma_parms; 1589 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1606 dma_set_max_seg_size(sdma->dma_device.dev, 65535); 1590 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
@@ -1629,38 +1613,22 @@ err_register:
1629 dma_async_device_unregister(&sdma->dma_device); 1613 dma_async_device_unregister(&sdma->dma_device);
1630err_init: 1614err_init:
1631 kfree(sdma->script_addrs); 1615 kfree(sdma->script_addrs);
1632err_alloc:
1633 free_irq(irq, sdma);
1634err_request_irq:
1635 iounmap(sdma->regs);
1636err_ioremap:
1637err_clk:
1638 release_mem_region(iores->start, resource_size(iores));
1639err_request_region:
1640err_irq:
1641 kfree(sdma);
1642 return ret; 1616 return ret;
1643} 1617}
1644 1618
1645static int sdma_remove(struct platform_device *pdev) 1619static int sdma_remove(struct platform_device *pdev)
1646{ 1620{
1647 struct sdma_engine *sdma = platform_get_drvdata(pdev); 1621 struct sdma_engine *sdma = platform_get_drvdata(pdev);
1648 struct resource *iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1649 int irq = platform_get_irq(pdev, 0);
1650 int i; 1622 int i;
1651 1623
1652 dma_async_device_unregister(&sdma->dma_device); 1624 dma_async_device_unregister(&sdma->dma_device);
1653 kfree(sdma->script_addrs); 1625 kfree(sdma->script_addrs);
1654 free_irq(irq, sdma);
1655 iounmap(sdma->regs);
1656 release_mem_region(iores->start, resource_size(iores));
1657 /* Kill the tasklet */ 1626 /* Kill the tasklet */
1658 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1627 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1659 struct sdma_channel *sdmac = &sdma->channel[i]; 1628 struct sdma_channel *sdmac = &sdma->channel[i];
1660 1629
1661 tasklet_kill(&sdmac->tasklet); 1630 tasklet_kill(&sdmac->tasklet);
1662 } 1631 }
1663 kfree(sdma);
1664 1632
1665 platform_set_drvdata(pdev, NULL); 1633 platform_set_drvdata(pdev, NULL);
1666 dev_info(&pdev->dev, "Removed...\n"); 1634 dev_info(&pdev->dev, "Removed...\n");
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 1aab8130efa1..5aaead9b56f7 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -492,10 +492,10 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
492 return ret; 492 return ret;
493} 493}
494 494
495static int dma_slave_control(struct dma_chan *chan, unsigned long arg) 495static int intel_mid_dma_config(struct dma_chan *chan,
496 struct dma_slave_config *slave)
496{ 497{
497 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 498 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
498 struct dma_slave_config *slave = (struct dma_slave_config *)arg;
499 struct intel_mid_dma_slave *mid_slave; 499 struct intel_mid_dma_slave *mid_slave;
500 500
501 BUG_ON(!midc); 501 BUG_ON(!midc);
@@ -509,28 +509,14 @@ static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
509 midc->mid_slave = mid_slave; 509 midc->mid_slave = mid_slave;
510 return 0; 510 return 0;
511} 511}
512/** 512
513 * intel_mid_dma_device_control - DMA device control 513static int intel_mid_dma_terminate_all(struct dma_chan *chan)
514 * @chan: chan for DMA control
515 * @cmd: control cmd
516 * @arg: cmd arg value
517 *
518 * Perform DMA control command
519 */
520static int intel_mid_dma_device_control(struct dma_chan *chan,
521 enum dma_ctrl_cmd cmd, unsigned long arg)
522{ 514{
523 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 515 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
524 struct middma_device *mid = to_middma_device(chan->device); 516 struct middma_device *mid = to_middma_device(chan->device);
525 struct intel_mid_dma_desc *desc, *_desc; 517 struct intel_mid_dma_desc *desc, *_desc;
526 union intel_mid_dma_cfg_lo cfg_lo; 518 union intel_mid_dma_cfg_lo cfg_lo;
527 519
528 if (cmd == DMA_SLAVE_CONFIG)
529 return dma_slave_control(chan, arg);
530
531 if (cmd != DMA_TERMINATE_ALL)
532 return -ENXIO;
533
534 spin_lock_bh(&midc->lock); 520 spin_lock_bh(&midc->lock);
535 if (midc->busy == false) { 521 if (midc->busy == false) {
536 spin_unlock_bh(&midc->lock); 522 spin_unlock_bh(&midc->lock);
@@ -1148,7 +1134,8 @@ static int mid_setup_dma(struct pci_dev *pdev)
1148 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; 1134 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
1149 dma->common.device_issue_pending = intel_mid_dma_issue_pending; 1135 dma->common.device_issue_pending = intel_mid_dma_issue_pending;
1150 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; 1136 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
1151 dma->common.device_control = intel_mid_dma_device_control; 1137 dma->common.device_config = intel_mid_dma_config;
1138 dma->common.device_terminate_all = intel_mid_dma_terminate_all;
1152 1139
1153 /*enable dma cntrl*/ 1140 /*enable dma cntrl*/
1154 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); 1141 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 32eae38291e5..77a6dcf25b98 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -214,6 +214,11 @@ static bool is_bwd_ioat(struct pci_dev *pdev)
214 case PCI_DEVICE_ID_INTEL_IOAT_BWD1: 214 case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
215 case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 215 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
216 case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 216 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
217 /* even though not Atom, BDX-DE has same DMA silicon */
218 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
219 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
220 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
221 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
217 return true; 222 return true;
218 default: 223 default:
219 return false; 224 return false;
@@ -489,6 +494,7 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
489 struct ioat_chan_common *chan = &ioat->base; 494 struct ioat_chan_common *chan = &ioat->base;
490 struct pci_dev *pdev = to_pdev(chan); 495 struct pci_dev *pdev = to_pdev(chan);
491 struct ioat_dma_descriptor *hw; 496 struct ioat_dma_descriptor *hw;
497 struct dma_async_tx_descriptor *tx;
492 u64 phys_complete; 498 u64 phys_complete;
493 struct ioat_ring_ent *desc; 499 struct ioat_ring_ent *desc;
494 u32 err_handled = 0; 500 u32 err_handled = 0;
@@ -534,6 +540,16 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
534 dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n", 540 dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
535 __func__, chanerr, err_handled); 541 __func__, chanerr, err_handled);
536 BUG(); 542 BUG();
543 } else { /* cleanup the faulty descriptor */
544 tx = &desc->txd;
545 if (tx->cookie) {
546 dma_cookie_complete(tx);
547 dma_descriptor_unmap(tx);
548 if (tx->callback) {
549 tx->callback(tx->callback_param);
550 tx->callback = NULL;
551 }
552 }
537 } 553 }
538 554
539 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); 555 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -1300,7 +1316,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1300 1316
1301 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1317 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1302 1318
1303 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 1319 if (tmo == 0 ||
1320 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1304 dev_err(dev, "Self-test xor timed out\n"); 1321 dev_err(dev, "Self-test xor timed out\n");
1305 err = -ENODEV; 1322 err = -ENODEV;
1306 goto dma_unmap; 1323 goto dma_unmap;
@@ -1366,7 +1383,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1366 1383
1367 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1384 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1368 1385
1369 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 1386 if (tmo == 0 ||
1387 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1370 dev_err(dev, "Self-test validate timed out\n"); 1388 dev_err(dev, "Self-test validate timed out\n");
1371 err = -ENODEV; 1389 err = -ENODEV;
1372 goto dma_unmap; 1390 goto dma_unmap;
@@ -1418,7 +1436,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1418 1436
1419 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1437 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1420 1438
1421 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 1439 if (tmo == 0 ||
1440 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1422 dev_err(dev, "Self-test 2nd validate timed out\n"); 1441 dev_err(dev, "Self-test 2nd validate timed out\n");
1423 err = -ENODEV; 1442 err = -ENODEV;
1424 goto dma_unmap; 1443 goto dma_unmap;
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 62f83e983d8d..02177ecf09f8 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -57,6 +57,11 @@
57#define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52 57#define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52
58#define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53 58#define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53
59 59
60#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE0 0x6f50
61#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE1 0x6f51
62#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE2 0x6f52
63#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE3 0x6f53
64
60#define IOAT_VER_1_2 0x12 /* Version 1.2 */ 65#define IOAT_VER_1_2 0x12 /* Version 1.2 */
61#define IOAT_VER_2_0 0x20 /* Version 2.0 */ 66#define IOAT_VER_2_0 0x20 /* Version 2.0 */
62#define IOAT_VER_3_0 0x30 /* Version 3.0 */ 67#define IOAT_VER_3_0 0x30 /* Version 3.0 */
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 1d051cd045db..5501eb072d69 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -111,6 +111,11 @@ static struct pci_device_id ioat_pci_tbl[] = {
111 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, 111 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
112 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, 112 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
113 113
114 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
115 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
116 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
117 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
118
114 { 0, } 119 { 0, }
115}; 120};
116MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); 121MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index c2b017ad139d..b54f62de9232 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1398,76 +1398,81 @@ static void idmac_issue_pending(struct dma_chan *chan)
1398 */ 1398 */
1399} 1399}
1400 1400
1401static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1401static int idmac_pause(struct dma_chan *chan)
1402 unsigned long arg)
1403{ 1402{
1404 struct idmac_channel *ichan = to_idmac_chan(chan); 1403 struct idmac_channel *ichan = to_idmac_chan(chan);
1405 struct idmac *idmac = to_idmac(chan->device); 1404 struct idmac *idmac = to_idmac(chan->device);
1406 struct ipu *ipu = to_ipu(idmac); 1405 struct ipu *ipu = to_ipu(idmac);
1407 struct list_head *list, *tmp; 1406 struct list_head *list, *tmp;
1408 unsigned long flags; 1407 unsigned long flags;
1409 int i;
1410 1408
1411 switch (cmd) { 1409 mutex_lock(&ichan->chan_mutex);
1412 case DMA_PAUSE:
1413 spin_lock_irqsave(&ipu->lock, flags);
1414 ipu_ic_disable_task(ipu, chan->chan_id);
1415 1410
1416 /* Return all descriptors into "prepared" state */ 1411 spin_lock_irqsave(&ipu->lock, flags);
1417 list_for_each_safe(list, tmp, &ichan->queue) 1412 ipu_ic_disable_task(ipu, chan->chan_id);
1418 list_del_init(list);
1419 1413
1420 ichan->sg[0] = NULL; 1414 /* Return all descriptors into "prepared" state */
1421 ichan->sg[1] = NULL; 1415 list_for_each_safe(list, tmp, &ichan->queue)
1416 list_del_init(list);
1422 1417
1423 spin_unlock_irqrestore(&ipu->lock, flags); 1418 ichan->sg[0] = NULL;
1419 ichan->sg[1] = NULL;
1424 1420
1425 ichan->status = IPU_CHANNEL_INITIALIZED; 1421 spin_unlock_irqrestore(&ipu->lock, flags);
1426 break;
1427 case DMA_TERMINATE_ALL:
1428 ipu_disable_channel(idmac, ichan,
1429 ichan->status >= IPU_CHANNEL_ENABLED);
1430 1422
1431 tasklet_disable(&ipu->tasklet); 1423 ichan->status = IPU_CHANNEL_INITIALIZED;
1432 1424
1433 /* ichan->queue is modified in ISR, have to spinlock */ 1425 mutex_unlock(&ichan->chan_mutex);
1434 spin_lock_irqsave(&ichan->lock, flags);
1435 list_splice_init(&ichan->queue, &ichan->free_list);
1436 1426
1437 if (ichan->desc) 1427 return 0;
1438 for (i = 0; i < ichan->n_tx_desc; i++) { 1428}
1439 struct idmac_tx_desc *desc = ichan->desc + i;
1440 if (list_empty(&desc->list))
1441 /* Descriptor was prepared, but not submitted */
1442 list_add(&desc->list, &ichan->free_list);
1443 1429
1444 async_tx_clear_ack(&desc->txd); 1430static int __idmac_terminate_all(struct dma_chan *chan)
1445 } 1431{
1432 struct idmac_channel *ichan = to_idmac_chan(chan);
1433 struct idmac *idmac = to_idmac(chan->device);
1434 struct ipu *ipu = to_ipu(idmac);
1435 unsigned long flags;
1436 int i;
1446 1437
1447 ichan->sg[0] = NULL; 1438 ipu_disable_channel(idmac, ichan,
1448 ichan->sg[1] = NULL; 1439 ichan->status >= IPU_CHANNEL_ENABLED);
1449 spin_unlock_irqrestore(&ichan->lock, flags);
1450 1440
1451 tasklet_enable(&ipu->tasklet); 1441 tasklet_disable(&ipu->tasklet);
1452 1442
1453 ichan->status = IPU_CHANNEL_INITIALIZED; 1443 /* ichan->queue is modified in ISR, have to spinlock */
1454 break; 1444 spin_lock_irqsave(&ichan->lock, flags);
1455 default: 1445 list_splice_init(&ichan->queue, &ichan->free_list);
1456 return -ENOSYS; 1446
1457 } 1447 if (ichan->desc)
1448 for (i = 0; i < ichan->n_tx_desc; i++) {
1449 struct idmac_tx_desc *desc = ichan->desc + i;
1450 if (list_empty(&desc->list))
1451 /* Descriptor was prepared, but not submitted */
1452 list_add(&desc->list, &ichan->free_list);
1453
1454 async_tx_clear_ack(&desc->txd);
1455 }
1456
1457 ichan->sg[0] = NULL;
1458 ichan->sg[1] = NULL;
1459 spin_unlock_irqrestore(&ichan->lock, flags);
1460
1461 tasklet_enable(&ipu->tasklet);
1462
1463 ichan->status = IPU_CHANNEL_INITIALIZED;
1458 1464
1459 return 0; 1465 return 0;
1460} 1466}
1461 1467
1462static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1468static int idmac_terminate_all(struct dma_chan *chan)
1463 unsigned long arg)
1464{ 1469{
1465 struct idmac_channel *ichan = to_idmac_chan(chan); 1470 struct idmac_channel *ichan = to_idmac_chan(chan);
1466 int ret; 1471 int ret;
1467 1472
1468 mutex_lock(&ichan->chan_mutex); 1473 mutex_lock(&ichan->chan_mutex);
1469 1474
1470 ret = __idmac_control(chan, cmd, arg); 1475 ret = __idmac_terminate_all(chan);
1471 1476
1472 mutex_unlock(&ichan->chan_mutex); 1477 mutex_unlock(&ichan->chan_mutex);
1473 1478
@@ -1568,7 +1573,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
1568 1573
1569 mutex_lock(&ichan->chan_mutex); 1574 mutex_lock(&ichan->chan_mutex);
1570 1575
1571 __idmac_control(chan, DMA_TERMINATE_ALL, 0); 1576 __idmac_terminate_all(chan);
1572 1577
1573 if (ichan->status > IPU_CHANNEL_FREE) { 1578 if (ichan->status > IPU_CHANNEL_FREE) {
1574#ifdef DEBUG 1579#ifdef DEBUG
@@ -1622,7 +1627,8 @@ static int __init ipu_idmac_init(struct ipu *ipu)
1622 1627
1623 /* Compulsory for DMA_SLAVE fields */ 1628 /* Compulsory for DMA_SLAVE fields */
1624 dma->device_prep_slave_sg = idmac_prep_slave_sg; 1629 dma->device_prep_slave_sg = idmac_prep_slave_sg;
1625 dma->device_control = idmac_control; 1630 dma->device_pause = idmac_pause;
1631 dma->device_terminate_all = idmac_terminate_all;
1626 1632
1627 INIT_LIST_HEAD(&dma->channels); 1633 INIT_LIST_HEAD(&dma->channels);
1628 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1634 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
@@ -1655,7 +1661,7 @@ static void ipu_idmac_exit(struct ipu *ipu)
1655 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1661 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1656 struct idmac_channel *ichan = ipu->channel + i; 1662 struct idmac_channel *ichan = ipu->channel + i;
1657 1663
1658 idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0); 1664 idmac_terminate_all(&ichan->dma_chan);
1659 } 1665 }
1660 1666
1661 dma_async_device_unregister(&idmac->dma); 1667 dma_async_device_unregister(&idmac->dma);
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index a1de14ab2c51..6f7f43529ccb 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
441 num = 0; 441 num = 0;
442 442
443 if (!c->ccfg) { 443 if (!c->ccfg) {
444 /* default is memtomem, without calling device_control */ 444 /* default is memtomem, without calling device_config */
445 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; 445 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
446 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ 446 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
447 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ 447 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
@@ -523,112 +523,126 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
523 return vchan_tx_prep(&c->vc, &ds->vd, flags); 523 return vchan_tx_prep(&c->vc, &ds->vd, flags);
524} 524}
525 525
526static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 526static int k3_dma_config(struct dma_chan *chan,
527 unsigned long arg) 527 struct dma_slave_config *cfg)
528{
529 struct k3_dma_chan *c = to_k3_chan(chan);
530 u32 maxburst = 0, val = 0;
531 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
532
533 if (cfg == NULL)
534 return -EINVAL;
535 c->dir = cfg->direction;
536 if (c->dir == DMA_DEV_TO_MEM) {
537 c->ccfg = CX_CFG_DSTINCR;
538 c->dev_addr = cfg->src_addr;
539 maxburst = cfg->src_maxburst;
540 width = cfg->src_addr_width;
541 } else if (c->dir == DMA_MEM_TO_DEV) {
542 c->ccfg = CX_CFG_SRCINCR;
543 c->dev_addr = cfg->dst_addr;
544 maxburst = cfg->dst_maxburst;
545 width = cfg->dst_addr_width;
546 }
547 switch (width) {
548 case DMA_SLAVE_BUSWIDTH_1_BYTE:
549 case DMA_SLAVE_BUSWIDTH_2_BYTES:
550 case DMA_SLAVE_BUSWIDTH_4_BYTES:
551 case DMA_SLAVE_BUSWIDTH_8_BYTES:
552 val = __ffs(width);
553 break;
554 default:
555 val = 3;
556 break;
557 }
558 c->ccfg |= (val << 12) | (val << 16);
559
560 if ((maxburst == 0) || (maxburst > 16))
561 val = 16;
562 else
563 val = maxburst - 1;
564 c->ccfg |= (val << 20) | (val << 24);
565 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
566
567 /* specific request line */
568 c->ccfg |= c->vc.chan.chan_id << 4;
569
570 return 0;
571}
572
573static int k3_dma_terminate_all(struct dma_chan *chan)
528{ 574{
529 struct k3_dma_chan *c = to_k3_chan(chan); 575 struct k3_dma_chan *c = to_k3_chan(chan);
530 struct k3_dma_dev *d = to_k3_dma(chan->device); 576 struct k3_dma_dev *d = to_k3_dma(chan->device);
531 struct dma_slave_config *cfg = (void *)arg;
532 struct k3_dma_phy *p = c->phy; 577 struct k3_dma_phy *p = c->phy;
533 unsigned long flags; 578 unsigned long flags;
534 u32 maxburst = 0, val = 0;
535 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
536 LIST_HEAD(head); 579 LIST_HEAD(head);
537 580
538 switch (cmd) { 581 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
539 case DMA_SLAVE_CONFIG:
540 if (cfg == NULL)
541 return -EINVAL;
542 c->dir = cfg->direction;
543 if (c->dir == DMA_DEV_TO_MEM) {
544 c->ccfg = CX_CFG_DSTINCR;
545 c->dev_addr = cfg->src_addr;
546 maxburst = cfg->src_maxburst;
547 width = cfg->src_addr_width;
548 } else if (c->dir == DMA_MEM_TO_DEV) {
549 c->ccfg = CX_CFG_SRCINCR;
550 c->dev_addr = cfg->dst_addr;
551 maxburst = cfg->dst_maxburst;
552 width = cfg->dst_addr_width;
553 }
554 switch (width) {
555 case DMA_SLAVE_BUSWIDTH_1_BYTE:
556 case DMA_SLAVE_BUSWIDTH_2_BYTES:
557 case DMA_SLAVE_BUSWIDTH_4_BYTES:
558 case DMA_SLAVE_BUSWIDTH_8_BYTES:
559 val = __ffs(width);
560 break;
561 default:
562 val = 3;
563 break;
564 }
565 c->ccfg |= (val << 12) | (val << 16);
566 582
567 if ((maxburst == 0) || (maxburst > 16)) 583 /* Prevent this channel being scheduled */
568 val = 16; 584 spin_lock(&d->lock);
569 else 585 list_del_init(&c->node);
570 val = maxburst - 1; 586 spin_unlock(&d->lock);
571 c->ccfg |= (val << 20) | (val << 24);
572 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
573 587
574 /* specific request line */ 588 /* Clear the tx descriptor lists */
575 c->ccfg |= c->vc.chan.chan_id << 4; 589 spin_lock_irqsave(&c->vc.lock, flags);
576 break; 590 vchan_get_all_descriptors(&c->vc, &head);
591 if (p) {
592 /* vchan is assigned to a pchan - stop the channel */
593 k3_dma_terminate_chan(p, d);
594 c->phy = NULL;
595 p->vchan = NULL;
596 p->ds_run = p->ds_done = NULL;
597 }
598 spin_unlock_irqrestore(&c->vc.lock, flags);
599 vchan_dma_desc_free_list(&c->vc, &head);
577 600
578 case DMA_TERMINATE_ALL: 601 return 0;
579 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); 602}
580 603
581 /* Prevent this channel being scheduled */ 604static int k3_dma_transfer_pause(struct dma_chan *chan)
582 spin_lock(&d->lock); 605{
583 list_del_init(&c->node); 606 struct k3_dma_chan *c = to_k3_chan(chan);
584 spin_unlock(&d->lock); 607 struct k3_dma_dev *d = to_k3_dma(chan->device);
608 struct k3_dma_phy *p = c->phy;
585 609
586 /* Clear the tx descriptor lists */ 610 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
587 spin_lock_irqsave(&c->vc.lock, flags); 611 if (c->status == DMA_IN_PROGRESS) {
588 vchan_get_all_descriptors(&c->vc, &head); 612 c->status = DMA_PAUSED;
589 if (p) { 613 if (p) {
590 /* vchan is assigned to a pchan - stop the channel */ 614 k3_dma_pause_dma(p, false);
591 k3_dma_terminate_chan(p, d); 615 } else {
592 c->phy = NULL; 616 spin_lock(&d->lock);
593 p->vchan = NULL; 617 list_del_init(&c->node);
594 p->ds_run = p->ds_done = NULL; 618 spin_unlock(&d->lock);
595 } 619 }
596 spin_unlock_irqrestore(&c->vc.lock, flags); 620 }
597 vchan_dma_desc_free_list(&c->vc, &head);
598 break;
599 621
600 case DMA_PAUSE: 622 return 0;
601 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); 623}
602 if (c->status == DMA_IN_PROGRESS) {
603 c->status = DMA_PAUSED;
604 if (p) {
605 k3_dma_pause_dma(p, false);
606 } else {
607 spin_lock(&d->lock);
608 list_del_init(&c->node);
609 spin_unlock(&d->lock);
610 }
611 }
612 break;
613 624
614 case DMA_RESUME: 625static int k3_dma_transfer_resume(struct dma_chan *chan)
615 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); 626{
616 spin_lock_irqsave(&c->vc.lock, flags); 627 struct k3_dma_chan *c = to_k3_chan(chan);
617 if (c->status == DMA_PAUSED) { 628 struct k3_dma_dev *d = to_k3_dma(chan->device);
618 c->status = DMA_IN_PROGRESS; 629 struct k3_dma_phy *p = c->phy;
619 if (p) { 630 unsigned long flags;
620 k3_dma_pause_dma(p, true); 631
621 } else if (!list_empty(&c->vc.desc_issued)) { 632 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
622 spin_lock(&d->lock); 633 spin_lock_irqsave(&c->vc.lock, flags);
623 list_add_tail(&c->node, &d->chan_pending); 634 if (c->status == DMA_PAUSED) {
624 spin_unlock(&d->lock); 635 c->status = DMA_IN_PROGRESS;
625 } 636 if (p) {
637 k3_dma_pause_dma(p, true);
638 } else if (!list_empty(&c->vc.desc_issued)) {
639 spin_lock(&d->lock);
640 list_add_tail(&c->node, &d->chan_pending);
641 spin_unlock(&d->lock);
626 } 642 }
627 spin_unlock_irqrestore(&c->vc.lock, flags);
628 break;
629 default:
630 return -ENXIO;
631 } 643 }
644 spin_unlock_irqrestore(&c->vc.lock, flags);
645
632 return 0; 646 return 0;
633} 647}
634 648
@@ -720,7 +734,10 @@ static int k3_dma_probe(struct platform_device *op)
720 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; 734 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
721 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; 735 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
722 d->slave.device_issue_pending = k3_dma_issue_pending; 736 d->slave.device_issue_pending = k3_dma_issue_pending;
723 d->slave.device_control = k3_dma_control; 737 d->slave.device_config = k3_dma_config;
738 d->slave.device_pause = k3_dma_transfer_pause;
739 d->slave.device_resume = k3_dma_transfer_resume;
740 d->slave.device_terminate_all = k3_dma_terminate_all;
724 d->slave.copy_align = DMA_ALIGN; 741 d->slave.copy_align = DMA_ALIGN;
725 742
726 /* init virtual channel */ 743 /* init virtual channel */
@@ -787,7 +804,7 @@ static int k3_dma_remove(struct platform_device *op)
787} 804}
788 805
789#ifdef CONFIG_PM_SLEEP 806#ifdef CONFIG_PM_SLEEP
790static int k3_dma_suspend(struct device *dev) 807static int k3_dma_suspend_dev(struct device *dev)
791{ 808{
792 struct k3_dma_dev *d = dev_get_drvdata(dev); 809 struct k3_dma_dev *d = dev_get_drvdata(dev);
793 u32 stat = 0; 810 u32 stat = 0;
@@ -803,7 +820,7 @@ static int k3_dma_suspend(struct device *dev)
803 return 0; 820 return 0;
804} 821}
805 822
806static int k3_dma_resume(struct device *dev) 823static int k3_dma_resume_dev(struct device *dev)
807{ 824{
808 struct k3_dma_dev *d = dev_get_drvdata(dev); 825 struct k3_dma_dev *d = dev_get_drvdata(dev);
809 int ret = 0; 826 int ret = 0;
@@ -818,7 +835,7 @@ static int k3_dma_resume(struct device *dev)
818} 835}
819#endif 836#endif
820 837
821static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); 838static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
822 839
823static struct platform_driver k3_pdma_driver = { 840static struct platform_driver k3_pdma_driver = {
824 .driver = { 841 .driver = {
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 8b8952f35e6c..8926f271904e 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -683,68 +683,70 @@ fail:
683 return NULL; 683 return NULL;
684} 684}
685 685
686static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 686static int mmp_pdma_config(struct dma_chan *dchan,
687 unsigned long arg) 687 struct dma_slave_config *cfg)
688{ 688{
689 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 689 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
690 struct dma_slave_config *cfg = (void *)arg;
691 unsigned long flags;
692 u32 maxburst = 0, addr = 0; 690 u32 maxburst = 0, addr = 0;
693 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 691 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
694 692
695 if (!dchan) 693 if (!dchan)
696 return -EINVAL; 694 return -EINVAL;
697 695
698 switch (cmd) { 696 if (cfg->direction == DMA_DEV_TO_MEM) {
699 case DMA_TERMINATE_ALL: 697 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
700 disable_chan(chan->phy); 698 maxburst = cfg->src_maxburst;
701 mmp_pdma_free_phy(chan); 699 width = cfg->src_addr_width;
702 spin_lock_irqsave(&chan->desc_lock, flags); 700 addr = cfg->src_addr;
703 mmp_pdma_free_desc_list(chan, &chan->chain_pending); 701 } else if (cfg->direction == DMA_MEM_TO_DEV) {
704 mmp_pdma_free_desc_list(chan, &chan->chain_running); 702 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
705 spin_unlock_irqrestore(&chan->desc_lock, flags); 703 maxburst = cfg->dst_maxburst;
706 chan->idle = true; 704 width = cfg->dst_addr_width;
707 break; 705 addr = cfg->dst_addr;
708 case DMA_SLAVE_CONFIG:
709 if (cfg->direction == DMA_DEV_TO_MEM) {
710 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
711 maxburst = cfg->src_maxburst;
712 width = cfg->src_addr_width;
713 addr = cfg->src_addr;
714 } else if (cfg->direction == DMA_MEM_TO_DEV) {
715 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
716 maxburst = cfg->dst_maxburst;
717 width = cfg->dst_addr_width;
718 addr = cfg->dst_addr;
719 }
720
721 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
722 chan->dcmd |= DCMD_WIDTH1;
723 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
724 chan->dcmd |= DCMD_WIDTH2;
725 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
726 chan->dcmd |= DCMD_WIDTH4;
727
728 if (maxburst == 8)
729 chan->dcmd |= DCMD_BURST8;
730 else if (maxburst == 16)
731 chan->dcmd |= DCMD_BURST16;
732 else if (maxburst == 32)
733 chan->dcmd |= DCMD_BURST32;
734
735 chan->dir = cfg->direction;
736 chan->dev_addr = addr;
737 /* FIXME: drivers should be ported over to use the filter
738 * function. Once that's done, the following two lines can
739 * be removed.
740 */
741 if (cfg->slave_id)
742 chan->drcmr = cfg->slave_id;
743 break;
744 default:
745 return -ENOSYS;
746 } 706 }
747 707
708 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
709 chan->dcmd |= DCMD_WIDTH1;
710 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
711 chan->dcmd |= DCMD_WIDTH2;
712 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
713 chan->dcmd |= DCMD_WIDTH4;
714
715 if (maxburst == 8)
716 chan->dcmd |= DCMD_BURST8;
717 else if (maxburst == 16)
718 chan->dcmd |= DCMD_BURST16;
719 else if (maxburst == 32)
720 chan->dcmd |= DCMD_BURST32;
721
722 chan->dir = cfg->direction;
723 chan->dev_addr = addr;
724 /* FIXME: drivers should be ported over to use the filter
725 * function. Once that's done, the following two lines can
726 * be removed.
727 */
728 if (cfg->slave_id)
729 chan->drcmr = cfg->slave_id;
730
731 return 0;
732}
733
734static int mmp_pdma_terminate_all(struct dma_chan *dchan)
735{
736 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
737 unsigned long flags;
738
739 if (!dchan)
740 return -EINVAL;
741
742 disable_chan(chan->phy);
743 mmp_pdma_free_phy(chan);
744 spin_lock_irqsave(&chan->desc_lock, flags);
745 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
746 mmp_pdma_free_desc_list(chan, &chan->chain_running);
747 spin_unlock_irqrestore(&chan->desc_lock, flags);
748 chan->idle = true;
749
748 return 0; 750 return 0;
749} 751}
750 752
@@ -1061,7 +1063,8 @@ static int mmp_pdma_probe(struct platform_device *op)
1061 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; 1063 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
1062 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; 1064 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
1063 pdev->device.device_issue_pending = mmp_pdma_issue_pending; 1065 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
1064 pdev->device.device_control = mmp_pdma_control; 1066 pdev->device.device_config = mmp_pdma_config;
1067 pdev->device.device_terminate_all = mmp_pdma_terminate_all;
1065 pdev->device.copy_align = PDMA_ALIGNMENT; 1068 pdev->device.copy_align = PDMA_ALIGNMENT;
1066 1069
1067 if (pdev->dev->coherent_dma_mask) 1070 if (pdev->dev->coherent_dma_mask)
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index bfb46957c3dc..70c2fa9963cd 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -19,7 +19,6 @@
19#include <linux/dmaengine.h> 19#include <linux/dmaengine.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <mach/regs-icu.h>
23#include <linux/platform_data/dma-mmp_tdma.h> 22#include <linux/platform_data/dma-mmp_tdma.h>
24#include <linux/of_device.h> 23#include <linux/of_device.h>
25#include <linux/of_dma.h> 24#include <linux/of_dma.h>
@@ -164,33 +163,46 @@ static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
164 tdmac->status = DMA_IN_PROGRESS; 163 tdmac->status = DMA_IN_PROGRESS;
165} 164}
166 165
167static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) 166static int mmp_tdma_disable_chan(struct dma_chan *chan)
168{ 167{
168 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
169
169 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, 170 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
170 tdmac->reg_base + TDCR); 171 tdmac->reg_base + TDCR);
171 172
172 tdmac->status = DMA_COMPLETE; 173 tdmac->status = DMA_COMPLETE;
174
175 return 0;
173} 176}
174 177
175static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) 178static int mmp_tdma_resume_chan(struct dma_chan *chan)
176{ 179{
180 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
181
177 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, 182 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
178 tdmac->reg_base + TDCR); 183 tdmac->reg_base + TDCR);
179 tdmac->status = DMA_IN_PROGRESS; 184 tdmac->status = DMA_IN_PROGRESS;
185
186 return 0;
180} 187}
181 188
182static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac) 189static int mmp_tdma_pause_chan(struct dma_chan *chan)
183{ 190{
191 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
192
184 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, 193 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
185 tdmac->reg_base + TDCR); 194 tdmac->reg_base + TDCR);
186 tdmac->status = DMA_PAUSED; 195 tdmac->status = DMA_PAUSED;
196
197 return 0;
187} 198}
188 199
189static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) 200static int mmp_tdma_config_chan(struct dma_chan *chan)
190{ 201{
202 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
191 unsigned int tdcr = 0; 203 unsigned int tdcr = 0;
192 204
193 mmp_tdma_disable_chan(tdmac); 205 mmp_tdma_disable_chan(chan);
194 206
195 if (tdmac->dir == DMA_MEM_TO_DEV) 207 if (tdmac->dir == DMA_MEM_TO_DEV)
196 tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC; 208 tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC;
@@ -452,42 +464,34 @@ err_out:
452 return NULL; 464 return NULL;
453} 465}
454 466
455static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 467static int mmp_tdma_terminate_all(struct dma_chan *chan)
456 unsigned long arg)
457{ 468{
458 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 469 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
459 struct dma_slave_config *dmaengine_cfg = (void *)arg; 470
460 int ret = 0; 471 mmp_tdma_disable_chan(chan);
461 472 /* disable interrupt */
462 switch (cmd) { 473 mmp_tdma_enable_irq(tdmac, false);
463 case DMA_TERMINATE_ALL: 474
464 mmp_tdma_disable_chan(tdmac); 475 return 0;
465 /* disable interrupt */ 476}
466 mmp_tdma_enable_irq(tdmac, false); 477
467 break; 478static int mmp_tdma_config(struct dma_chan *chan,
468 case DMA_PAUSE: 479 struct dma_slave_config *dmaengine_cfg)
469 mmp_tdma_pause_chan(tdmac); 480{
470 break; 481 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
471 case DMA_RESUME: 482
472 mmp_tdma_resume_chan(tdmac); 483 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
473 break; 484 tdmac->dev_addr = dmaengine_cfg->src_addr;
474 case DMA_SLAVE_CONFIG: 485 tdmac->burst_sz = dmaengine_cfg->src_maxburst;
475 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { 486 tdmac->buswidth = dmaengine_cfg->src_addr_width;
476 tdmac->dev_addr = dmaengine_cfg->src_addr; 487 } else {
477 tdmac->burst_sz = dmaengine_cfg->src_maxburst; 488 tdmac->dev_addr = dmaengine_cfg->dst_addr;
478 tdmac->buswidth = dmaengine_cfg->src_addr_width; 489 tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
479 } else { 490 tdmac->buswidth = dmaengine_cfg->dst_addr_width;
480 tdmac->dev_addr = dmaengine_cfg->dst_addr;
481 tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
482 tdmac->buswidth = dmaengine_cfg->dst_addr_width;
483 }
484 tdmac->dir = dmaengine_cfg->direction;
485 return mmp_tdma_config_chan(tdmac);
486 default:
487 ret = -ENOSYS;
488 } 491 }
492 tdmac->dir = dmaengine_cfg->direction;
489 493
490 return ret; 494 return mmp_tdma_config_chan(chan);
491} 495}
492 496
493static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, 497static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
@@ -668,7 +672,10 @@ static int mmp_tdma_probe(struct platform_device *pdev)
668 tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic; 672 tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
669 tdev->device.device_tx_status = mmp_tdma_tx_status; 673 tdev->device.device_tx_status = mmp_tdma_tx_status;
670 tdev->device.device_issue_pending = mmp_tdma_issue_pending; 674 tdev->device.device_issue_pending = mmp_tdma_issue_pending;
671 tdev->device.device_control = mmp_tdma_control; 675 tdev->device.device_config = mmp_tdma_config;
676 tdev->device.device_pause = mmp_tdma_pause_chan;
677 tdev->device.device_resume = mmp_tdma_resume_chan;
678 tdev->device.device_terminate_all = mmp_tdma_terminate_all;
672 tdev->device.copy_align = TDMA_ALIGNMENT; 679 tdev->device.copy_align = TDMA_ALIGNMENT;
673 680
674 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 681 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 53032bac06e0..15cab7d79525 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -263,28 +263,6 @@ static int moxart_slave_config(struct dma_chan *chan,
263 return 0; 263 return 0;
264} 264}
265 265
266static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
267 unsigned long arg)
268{
269 int ret = 0;
270
271 switch (cmd) {
272 case DMA_PAUSE:
273 case DMA_RESUME:
274 return -EINVAL;
275 case DMA_TERMINATE_ALL:
276 moxart_terminate_all(chan);
277 break;
278 case DMA_SLAVE_CONFIG:
279 ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
280 break;
281 default:
282 ret = -ENOSYS;
283 }
284
285 return ret;
286}
287
288static struct dma_async_tx_descriptor *moxart_prep_slave_sg( 266static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
289 struct dma_chan *chan, struct scatterlist *sgl, 267 struct dma_chan *chan, struct scatterlist *sgl,
290 unsigned int sg_len, enum dma_transfer_direction dir, 268 unsigned int sg_len, enum dma_transfer_direction dir,
@@ -531,7 +509,8 @@ static void moxart_dma_init(struct dma_device *dma, struct device *dev)
531 dma->device_free_chan_resources = moxart_free_chan_resources; 509 dma->device_free_chan_resources = moxart_free_chan_resources;
532 dma->device_issue_pending = moxart_issue_pending; 510 dma->device_issue_pending = moxart_issue_pending;
533 dma->device_tx_status = moxart_tx_status; 511 dma->device_tx_status = moxart_tx_status;
534 dma->device_control = moxart_control; 512 dma->device_config = moxart_slave_config;
513 dma->device_terminate_all = moxart_terminate_all;
535 dma->dev = dev; 514 dma->dev = dev;
536 515
537 INIT_LIST_HEAD(&dma->channels); 516 INIT_LIST_HEAD(&dma->channels);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 01bec4023de2..57d2457545f3 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -800,79 +800,69 @@ err_prep:
800 return NULL; 800 return NULL;
801} 801}
802 802
803static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 803static int mpc_dma_device_config(struct dma_chan *chan,
804 unsigned long arg) 804 struct dma_slave_config *cfg)
805{ 805{
806 struct mpc_dma_chan *mchan; 806 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
807 struct mpc_dma *mdma;
808 struct dma_slave_config *cfg;
809 unsigned long flags; 807 unsigned long flags;
810 808
811 mchan = dma_chan_to_mpc_dma_chan(chan); 809 /*
812 switch (cmd) { 810 * Software constraints:
813 case DMA_TERMINATE_ALL: 811 * - only transfers between a peripheral device and
814 /* Disable channel requests */ 812 * memory are supported;
815 mdma = dma_chan_to_mpc_dma(chan); 813 * - only peripheral devices with 4-byte FIFO access register
816 814 * are supported;
817 spin_lock_irqsave(&mchan->lock, flags); 815 * - minimal transfer chunk is 4 bytes and consequently
818 816 * source and destination addresses must be 4-byte aligned
819 out_8(&mdma->regs->dmacerq, chan->chan_id); 817 * and transfer size must be aligned on (4 * maxburst)
820 list_splice_tail_init(&mchan->prepared, &mchan->free); 818 * boundary;
821 list_splice_tail_init(&mchan->queued, &mchan->free); 819 * - during the transfer RAM address is being incremented by
822 list_splice_tail_init(&mchan->active, &mchan->free); 820 * the size of minimal transfer chunk;
823 821 * - peripheral port's address is constant during the transfer.
824 spin_unlock_irqrestore(&mchan->lock, flags); 822 */
825 823
826 return 0; 824 if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
825 cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
826 !IS_ALIGNED(cfg->src_addr, 4) ||
827 !IS_ALIGNED(cfg->dst_addr, 4)) {
828 return -EINVAL;
829 }
827 830
828 case DMA_SLAVE_CONFIG: 831 spin_lock_irqsave(&mchan->lock, flags);
829 /*
830 * Software constraints:
831 * - only transfers between a peripheral device and
832 * memory are supported;
833 * - only peripheral devices with 4-byte FIFO access register
834 * are supported;
835 * - minimal transfer chunk is 4 bytes and consequently
836 * source and destination addresses must be 4-byte aligned
837 * and transfer size must be aligned on (4 * maxburst)
838 * boundary;
839 * - during the transfer RAM address is being incremented by
840 * the size of minimal transfer chunk;
841 * - peripheral port's address is constant during the transfer.
842 */
843 832
844 cfg = (void *)arg; 833 mchan->src_per_paddr = cfg->src_addr;
834 mchan->src_tcd_nunits = cfg->src_maxburst;
835 mchan->dst_per_paddr = cfg->dst_addr;
836 mchan->dst_tcd_nunits = cfg->dst_maxburst;
845 837
846 if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || 838 /* Apply defaults */
847 cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || 839 if (mchan->src_tcd_nunits == 0)
848 !IS_ALIGNED(cfg->src_addr, 4) || 840 mchan->src_tcd_nunits = 1;
849 !IS_ALIGNED(cfg->dst_addr, 4)) { 841 if (mchan->dst_tcd_nunits == 0)
850 return -EINVAL; 842 mchan->dst_tcd_nunits = 1;
851 }
852 843
853 spin_lock_irqsave(&mchan->lock, flags); 844 spin_unlock_irqrestore(&mchan->lock, flags);
854 845
855 mchan->src_per_paddr = cfg->src_addr; 846 return 0;
856 mchan->src_tcd_nunits = cfg->src_maxburst; 847}
857 mchan->dst_per_paddr = cfg->dst_addr;
858 mchan->dst_tcd_nunits = cfg->dst_maxburst;
859 848
860 /* Apply defaults */ 849static int mpc_dma_device_terminate_all(struct dma_chan *chan)
861 if (mchan->src_tcd_nunits == 0) 850{
862 mchan->src_tcd_nunits = 1; 851 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
863 if (mchan->dst_tcd_nunits == 0) 852 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
864 mchan->dst_tcd_nunits = 1; 853 unsigned long flags;
865 854
866 spin_unlock_irqrestore(&mchan->lock, flags); 855 /* Disable channel requests */
856 spin_lock_irqsave(&mchan->lock, flags);
867 857
868 return 0; 858 out_8(&mdma->regs->dmacerq, chan->chan_id);
859 list_splice_tail_init(&mchan->prepared, &mchan->free);
860 list_splice_tail_init(&mchan->queued, &mchan->free);
861 list_splice_tail_init(&mchan->active, &mchan->free);
869 862
870 default: 863 spin_unlock_irqrestore(&mchan->lock, flags);
871 /* Unknown command */
872 break;
873 }
874 864
875 return -ENXIO; 865 return 0;
876} 866}
877 867
878static int mpc_dma_probe(struct platform_device *op) 868static int mpc_dma_probe(struct platform_device *op)
@@ -963,7 +953,8 @@ static int mpc_dma_probe(struct platform_device *op)
963 dma->device_tx_status = mpc_dma_tx_status; 953 dma->device_tx_status = mpc_dma_tx_status;
964 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; 954 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
965 dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; 955 dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
966 dma->device_control = mpc_dma_device_control; 956 dma->device_config = mpc_dma_device_config;
957 dma->device_terminate_all = mpc_dma_device_terminate_all;
967 958
968 INIT_LIST_HEAD(&dma->channels); 959 INIT_LIST_HEAD(&dma->channels);
969 dma_cap_set(DMA_MEMCPY, dma->cap_mask); 960 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index d7ac558c2c1c..b03e8137b918 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -928,14 +928,6 @@ out:
928 return err; 928 return err;
929} 929}
930 930
931/* This driver does not implement any of the optional DMA operations. */
932static int
933mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
934 unsigned long arg)
935{
936 return -ENOSYS;
937}
938
939static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) 931static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
940{ 932{
941 struct dma_chan *chan, *_chan; 933 struct dma_chan *chan, *_chan;
@@ -1008,7 +1000,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
1008 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1000 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1009 dma_dev->device_tx_status = mv_xor_status; 1001 dma_dev->device_tx_status = mv_xor_status;
1010 dma_dev->device_issue_pending = mv_xor_issue_pending; 1002 dma_dev->device_issue_pending = mv_xor_issue_pending;
1011 dma_dev->device_control = mv_xor_control;
1012 dma_dev->dev = &pdev->dev; 1003 dma_dev->dev = &pdev->dev;
1013 1004
1014 /* set prep routines based on capability */ 1005 /* set prep routines based on capability */
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 5ea61201dbf0..829ec686dac3 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -202,8 +202,9 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
202 return container_of(chan, struct mxs_dma_chan, chan); 202 return container_of(chan, struct mxs_dma_chan, chan);
203} 203}
204 204
205static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 205static void mxs_dma_reset_chan(struct dma_chan *chan)
206{ 206{
207 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
207 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 208 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
208 int chan_id = mxs_chan->chan.chan_id; 209 int chan_id = mxs_chan->chan.chan_id;
209 210
@@ -250,8 +251,9 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
250 mxs_chan->status = DMA_COMPLETE; 251 mxs_chan->status = DMA_COMPLETE;
251} 252}
252 253
253static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) 254static void mxs_dma_enable_chan(struct dma_chan *chan)
254{ 255{
256 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
255 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 257 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
256 int chan_id = mxs_chan->chan.chan_id; 258 int chan_id = mxs_chan->chan.chan_id;
257 259
@@ -272,13 +274,16 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
272 mxs_chan->reset = false; 274 mxs_chan->reset = false;
273} 275}
274 276
275static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 277static void mxs_dma_disable_chan(struct dma_chan *chan)
276{ 278{
279 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
280
277 mxs_chan->status = DMA_COMPLETE; 281 mxs_chan->status = DMA_COMPLETE;
278} 282}
279 283
280static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) 284static int mxs_dma_pause_chan(struct dma_chan *chan)
281{ 285{
286 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
282 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 287 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
283 int chan_id = mxs_chan->chan.chan_id; 288 int chan_id = mxs_chan->chan.chan_id;
284 289
@@ -291,10 +296,12 @@ static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
291 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); 296 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
292 297
293 mxs_chan->status = DMA_PAUSED; 298 mxs_chan->status = DMA_PAUSED;
299 return 0;
294} 300}
295 301
296static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) 302static int mxs_dma_resume_chan(struct dma_chan *chan)
297{ 303{
304 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
298 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 305 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
299 int chan_id = mxs_chan->chan.chan_id; 306 int chan_id = mxs_chan->chan.chan_id;
300 307
@@ -307,6 +314,7 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
307 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); 314 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
308 315
309 mxs_chan->status = DMA_IN_PROGRESS; 316 mxs_chan->status = DMA_IN_PROGRESS;
317 return 0;
310} 318}
311 319
312static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) 320static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -383,7 +391,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
383 "%s: error in channel %d\n", __func__, 391 "%s: error in channel %d\n", __func__,
384 chan); 392 chan);
385 mxs_chan->status = DMA_ERROR; 393 mxs_chan->status = DMA_ERROR;
386 mxs_dma_reset_chan(mxs_chan); 394 mxs_dma_reset_chan(&mxs_chan->chan);
387 } else if (mxs_chan->status != DMA_COMPLETE) { 395 } else if (mxs_chan->status != DMA_COMPLETE) {
388 if (mxs_chan->flags & MXS_DMA_SG_LOOP) { 396 if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
389 mxs_chan->status = DMA_IN_PROGRESS; 397 mxs_chan->status = DMA_IN_PROGRESS;
@@ -432,7 +440,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
432 if (ret) 440 if (ret)
433 goto err_clk; 441 goto err_clk;
434 442
435 mxs_dma_reset_chan(mxs_chan); 443 mxs_dma_reset_chan(chan);
436 444
437 dma_async_tx_descriptor_init(&mxs_chan->desc, chan); 445 dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
438 mxs_chan->desc.tx_submit = mxs_dma_tx_submit; 446 mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
@@ -456,7 +464,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
456 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 464 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
457 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 465 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
458 466
459 mxs_dma_disable_chan(mxs_chan); 467 mxs_dma_disable_chan(chan);
460 468
461 free_irq(mxs_chan->chan_irq, mxs_dma); 469 free_irq(mxs_chan->chan_irq, mxs_dma);
462 470
@@ -651,28 +659,12 @@ err_out:
651 return NULL; 659 return NULL;
652} 660}
653 661
654static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 662static int mxs_dma_terminate_all(struct dma_chan *chan)
655 unsigned long arg)
656{ 663{
657 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 664 mxs_dma_reset_chan(chan);
658 int ret = 0; 665 mxs_dma_disable_chan(chan);
659
660 switch (cmd) {
661 case DMA_TERMINATE_ALL:
662 mxs_dma_reset_chan(mxs_chan);
663 mxs_dma_disable_chan(mxs_chan);
664 break;
665 case DMA_PAUSE:
666 mxs_dma_pause_chan(mxs_chan);
667 break;
668 case DMA_RESUME:
669 mxs_dma_resume_chan(mxs_chan);
670 break;
671 default:
672 ret = -ENOSYS;
673 }
674 666
675 return ret; 667 return 0;
676} 668}
677 669
678static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, 670static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
@@ -701,13 +693,6 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
701 return mxs_chan->status; 693 return mxs_chan->status;
702} 694}
703 695
704static void mxs_dma_issue_pending(struct dma_chan *chan)
705{
706 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
707
708 mxs_dma_enable_chan(mxs_chan);
709}
710
711static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) 696static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
712{ 697{
713 int ret; 698 int ret;
@@ -860,8 +845,14 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
860 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; 845 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
861 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; 846 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
862 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; 847 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
863 mxs_dma->dma_device.device_control = mxs_dma_control; 848 mxs_dma->dma_device.device_pause = mxs_dma_pause_chan;
864 mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; 849 mxs_dma->dma_device.device_resume = mxs_dma_resume_chan;
850 mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all;
851 mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
852 mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
853 mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
854 mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
855 mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
865 856
866 ret = dma_async_device_register(&mxs_dma->dma_device); 857 ret = dma_async_device_register(&mxs_dma->dma_device);
867 if (ret) { 858 if (ret) {
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index d7d61e1a01c3..88b77c98365d 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -504,7 +504,7 @@ static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
504 * pauses DMA and reads out data received via DMA as well as those left 504 * pauses DMA and reads out data received via DMA as well as those left
505 * in the Rx FIFO. For this to work with the RAM side using burst 505 * in the Rx FIFO. For this to work with the RAM side using burst
506 * transfers we enable the SBE bit and terminate the transfer in our 506 * transfers we enable the SBE bit and terminate the transfer in our
507 * DMA_PAUSE handler. 507 * .device_pause handler.
508 */ 508 */
509 mem_xfer = nbpf_xfer_ds(chan->nbpf, size); 509 mem_xfer = nbpf_xfer_ds(chan->nbpf, size);
510 510
@@ -565,13 +565,6 @@ static void nbpf_configure(struct nbpf_device *nbpf)
565 nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT); 565 nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
566} 566}
567 567
568static void nbpf_pause(struct nbpf_channel *chan)
569{
570 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
571 /* See comment in nbpf_prep_one() */
572 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
573}
574
575/* Generic part */ 568/* Generic part */
576 569
577/* DMA ENGINE functions */ 570/* DMA ENGINE functions */
@@ -837,54 +830,58 @@ static void nbpf_chan_idle(struct nbpf_channel *chan)
837 } 830 }
838} 831}
839 832
840static int nbpf_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 833static int nbpf_pause(struct dma_chan *dchan)
841 unsigned long arg)
842{ 834{
843 struct nbpf_channel *chan = nbpf_to_chan(dchan); 835 struct nbpf_channel *chan = nbpf_to_chan(dchan);
844 struct dma_slave_config *config;
845 836
846 dev_dbg(dchan->device->dev, "Entry %s(%d)\n", __func__, cmd); 837 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
847 838
848 switch (cmd) { 839 chan->paused = true;
849 case DMA_TERMINATE_ALL: 840 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
850 dev_dbg(dchan->device->dev, "Terminating\n"); 841 /* See comment in nbpf_prep_one() */
851 nbpf_chan_halt(chan); 842 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
852 nbpf_chan_idle(chan);
853 break;
854 843
855 case DMA_SLAVE_CONFIG: 844 return 0;
856 if (!arg) 845}
857 return -EINVAL;
858 config = (struct dma_slave_config *)arg;
859 846
860 /* 847static int nbpf_terminate_all(struct dma_chan *dchan)
861 * We could check config->slave_id to match chan->terminal here, 848{
862 * but with DT they would be coming from the same source, so 849 struct nbpf_channel *chan = nbpf_to_chan(dchan);
863 * such a check would be superflous
864 */
865 850
866 chan->slave_dst_addr = config->dst_addr; 851 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
867 chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, 852 dev_dbg(dchan->device->dev, "Terminating\n");
868 config->dst_addr_width, 1);
869 chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
870 config->dst_addr_width,
871 config->dst_maxburst);
872 chan->slave_src_addr = config->src_addr;
873 chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
874 config->src_addr_width, 1);
875 chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
876 config->src_addr_width,
877 config->src_maxburst);
878 break;
879 853
880 case DMA_PAUSE: 854 nbpf_chan_halt(chan);
881 chan->paused = true; 855 nbpf_chan_idle(chan);
882 nbpf_pause(chan);
883 break;
884 856
885 default: 857 return 0;
886 return -ENXIO; 858}
887 } 859
860static int nbpf_config(struct dma_chan *dchan,
861 struct dma_slave_config *config)
862{
863 struct nbpf_channel *chan = nbpf_to_chan(dchan);
864
865 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
866
867 /*
868 * We could check config->slave_id to match chan->terminal here,
869 * but with DT they would be coming from the same source, so
870 * such a check would be superflous
871 */
872
873 chan->slave_dst_addr = config->dst_addr;
874 chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
875 config->dst_addr_width, 1);
876 chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
877 config->dst_addr_width,
878 config->dst_maxburst);
879 chan->slave_src_addr = config->src_addr;
880 chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
881 config->src_addr_width, 1);
882 chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
883 config->src_addr_width,
884 config->src_maxburst);
888 885
889 return 0; 886 return 0;
890} 887}
@@ -1072,18 +1069,6 @@ static void nbpf_free_chan_resources(struct dma_chan *dchan)
1072 } 1069 }
1073} 1070}
1074 1071
1075static int nbpf_slave_caps(struct dma_chan *dchan,
1076 struct dma_slave_caps *caps)
1077{
1078 caps->src_addr_widths = NBPF_DMA_BUSWIDTHS;
1079 caps->dstn_addr_widths = NBPF_DMA_BUSWIDTHS;
1080 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1081 caps->cmd_pause = false;
1082 caps->cmd_terminate = true;
1083
1084 return 0;
1085}
1086
1087static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, 1072static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
1088 struct of_dma *ofdma) 1073 struct of_dma *ofdma)
1089{ 1074{
@@ -1414,7 +1399,6 @@ static int nbpf_probe(struct platform_device *pdev)
1414 dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; 1399 dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
1415 dma_dev->device_tx_status = nbpf_tx_status; 1400 dma_dev->device_tx_status = nbpf_tx_status;
1416 dma_dev->device_issue_pending = nbpf_issue_pending; 1401 dma_dev->device_issue_pending = nbpf_issue_pending;
1417 dma_dev->device_slave_caps = nbpf_slave_caps;
1418 1402
1419 /* 1403 /*
1420 * If we drop support for unaligned MEMCPY buffer addresses and / or 1404 * If we drop support for unaligned MEMCPY buffer addresses and / or
@@ -1426,7 +1410,13 @@ static int nbpf_probe(struct platform_device *pdev)
1426 1410
1427 /* Compulsory for DMA_SLAVE fields */ 1411 /* Compulsory for DMA_SLAVE fields */
1428 dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg; 1412 dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg;
1429 dma_dev->device_control = nbpf_control; 1413 dma_dev->device_config = nbpf_config;
1414 dma_dev->device_pause = nbpf_pause;
1415 dma_dev->device_terminate_all = nbpf_terminate_all;
1416
1417 dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS;
1418 dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS;
1419 dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1430 1420
1431 platform_set_drvdata(pdev, nbpf); 1421 platform_set_drvdata(pdev, nbpf);
1432 1422
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index d5fbeaa1e7ba..ca31f1b45366 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -159,6 +159,10 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
159 return ERR_PTR(-ENODEV); 159 return ERR_PTR(-ENODEV);
160 } 160 }
161 161
162 /* Silently fail if there is not even the "dmas" property */
163 if (!of_find_property(np, "dmas", NULL))
164 return ERR_PTR(-ENODEV);
165
162 count = of_property_count_strings(np, "dma-names"); 166 count = of_property_count_strings(np, "dma-names");
163 if (count < 0) { 167 if (count < 0) {
164 pr_err("%s: dma-names property of node '%s' missing or empty\n", 168 pr_err("%s: dma-names property of node '%s' missing or empty\n",
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index c0016a68b446..7dd6dd121681 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -948,8 +948,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
948 return vchan_tx_prep(&c->vc, &d->vd, flags); 948 return vchan_tx_prep(&c->vc, &d->vd, flags);
949} 949}
950 950
951static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) 951static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
952{ 952{
953 struct omap_chan *c = to_omap_dma_chan(chan);
954
953 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 955 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
954 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 956 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
955 return -EINVAL; 957 return -EINVAL;
@@ -959,8 +961,9 @@ static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *c
959 return 0; 961 return 0;
960} 962}
961 963
962static int omap_dma_terminate_all(struct omap_chan *c) 964static int omap_dma_terminate_all(struct dma_chan *chan)
963{ 965{
966 struct omap_chan *c = to_omap_dma_chan(chan);
964 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); 967 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
965 unsigned long flags; 968 unsigned long flags;
966 LIST_HEAD(head); 969 LIST_HEAD(head);
@@ -996,8 +999,10 @@ static int omap_dma_terminate_all(struct omap_chan *c)
996 return 0; 999 return 0;
997} 1000}
998 1001
999static int omap_dma_pause(struct omap_chan *c) 1002static int omap_dma_pause(struct dma_chan *chan)
1000{ 1003{
1004 struct omap_chan *c = to_omap_dma_chan(chan);
1005
1001 /* Pause/Resume only allowed with cyclic mode */ 1006 /* Pause/Resume only allowed with cyclic mode */
1002 if (!c->cyclic) 1007 if (!c->cyclic)
1003 return -EINVAL; 1008 return -EINVAL;
@@ -1010,8 +1015,10 @@ static int omap_dma_pause(struct omap_chan *c)
1010 return 0; 1015 return 0;
1011} 1016}
1012 1017
1013static int omap_dma_resume(struct omap_chan *c) 1018static int omap_dma_resume(struct dma_chan *chan)
1014{ 1019{
1020 struct omap_chan *c = to_omap_dma_chan(chan);
1021
1015 /* Pause/Resume only allowed with cyclic mode */ 1022 /* Pause/Resume only allowed with cyclic mode */
1016 if (!c->cyclic) 1023 if (!c->cyclic)
1017 return -EINVAL; 1024 return -EINVAL;
@@ -1029,37 +1036,6 @@ static int omap_dma_resume(struct omap_chan *c)
1029 return 0; 1036 return 0;
1030} 1037}
1031 1038
1032static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1033 unsigned long arg)
1034{
1035 struct omap_chan *c = to_omap_dma_chan(chan);
1036 int ret;
1037
1038 switch (cmd) {
1039 case DMA_SLAVE_CONFIG:
1040 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
1041 break;
1042
1043 case DMA_TERMINATE_ALL:
1044 ret = omap_dma_terminate_all(c);
1045 break;
1046
1047 case DMA_PAUSE:
1048 ret = omap_dma_pause(c);
1049 break;
1050
1051 case DMA_RESUME:
1052 ret = omap_dma_resume(c);
1053 break;
1054
1055 default:
1056 ret = -ENXIO;
1057 break;
1058 }
1059
1060 return ret;
1061}
1062
1063static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) 1039static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
1064{ 1040{
1065 struct omap_chan *c; 1041 struct omap_chan *c;
@@ -1094,19 +1070,6 @@ static void omap_dma_free(struct omap_dmadev *od)
1094 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 1070 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1095 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 1071 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1096 1072
1097static int omap_dma_device_slave_caps(struct dma_chan *dchan,
1098 struct dma_slave_caps *caps)
1099{
1100 caps->src_addr_widths = OMAP_DMA_BUSWIDTHS;
1101 caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS;
1102 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1103 caps->cmd_pause = true;
1104 caps->cmd_terminate = true;
1105 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1106
1107 return 0;
1108}
1109
1110static int omap_dma_probe(struct platform_device *pdev) 1073static int omap_dma_probe(struct platform_device *pdev)
1111{ 1074{
1112 struct omap_dmadev *od; 1075 struct omap_dmadev *od;
@@ -1136,8 +1099,14 @@ static int omap_dma_probe(struct platform_device *pdev)
1136 od->ddev.device_issue_pending = omap_dma_issue_pending; 1099 od->ddev.device_issue_pending = omap_dma_issue_pending;
1137 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; 1100 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
1138 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; 1101 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
1139 od->ddev.device_control = omap_dma_control; 1102 od->ddev.device_config = omap_dma_slave_config;
1140 od->ddev.device_slave_caps = omap_dma_device_slave_caps; 1103 od->ddev.device_pause = omap_dma_pause;
1104 od->ddev.device_resume = omap_dma_resume;
1105 od->ddev.device_terminate_all = omap_dma_terminate_all;
1106 od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
1107 od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
1108 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1109 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1141 od->ddev.dev = &pdev->dev; 1110 od->ddev.dev = &pdev->dev;
1142 INIT_LIST_HEAD(&od->ddev.channels); 1111 INIT_LIST_HEAD(&od->ddev.channels);
1143 INIT_LIST_HEAD(&od->pending); 1112 INIT_LIST_HEAD(&od->pending);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 6e0e47d76b23..35c143cb88da 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -665,16 +665,12 @@ err_desc_get:
665 return NULL; 665 return NULL;
666} 666}
667 667
668static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 668static int pd_device_terminate_all(struct dma_chan *chan)
669 unsigned long arg)
670{ 669{
671 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 670 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
672 struct pch_dma_desc *desc, *_d; 671 struct pch_dma_desc *desc, *_d;
673 LIST_HEAD(list); 672 LIST_HEAD(list);
674 673
675 if (cmd != DMA_TERMINATE_ALL)
676 return -ENXIO;
677
678 spin_lock_irq(&pd_chan->lock); 674 spin_lock_irq(&pd_chan->lock);
679 675
680 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); 676 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
@@ -932,7 +928,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
932 pd->dma.device_tx_status = pd_tx_status; 928 pd->dma.device_tx_status = pd_tx_status;
933 pd->dma.device_issue_pending = pd_issue_pending; 929 pd->dma.device_issue_pending = pd_issue_pending;
934 pd->dma.device_prep_slave_sg = pd_prep_slave_sg; 930 pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
935 pd->dma.device_control = pd_device_control; 931 pd->dma.device_terminate_all = pd_device_terminate_all;
936 932
937 err = dma_async_device_register(&pd->dma); 933 err = dma_async_device_register(&pd->dma);
938 if (err) { 934 if (err) {
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index bdf40b530032..0e1f56772855 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -504,6 +504,9 @@ struct dma_pl330_desc {
504 504
505 enum desc_status status; 505 enum desc_status status;
506 506
507 int bytes_requested;
508 bool last;
509
507 /* The channel which currently holds this desc */ 510 /* The channel which currently holds this desc */
508 struct dma_pl330_chan *pchan; 511 struct dma_pl330_chan *pchan;
509 512
@@ -1048,6 +1051,10 @@ static bool _trigger(struct pl330_thread *thrd)
1048 if (!req) 1051 if (!req)
1049 return true; 1052 return true;
1050 1053
1054 /* Return if req is running */
1055 if (idx == thrd->req_running)
1056 return true;
1057
1051 desc = req->desc; 1058 desc = req->desc;
1052 1059
1053 ns = desc->rqcfg.nonsecure ? 1 : 0; 1060 ns = desc->rqcfg.nonsecure ? 1 : 0;
@@ -1587,6 +1594,8 @@ static int pl330_update(struct pl330_dmac *pl330)
1587 descdone = thrd->req[active].desc; 1594 descdone = thrd->req[active].desc;
1588 thrd->req[active].desc = NULL; 1595 thrd->req[active].desc = NULL;
1589 1596
1597 thrd->req_running = -1;
1598
1590 /* Get going again ASAP */ 1599 /* Get going again ASAP */
1591 _start(thrd); 1600 _start(thrd);
1592 1601
@@ -2086,77 +2095,89 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
2086 return 1; 2095 return 1;
2087} 2096}
2088 2097
2089static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) 2098static int pl330_config(struct dma_chan *chan,
2099 struct dma_slave_config *slave_config)
2100{
2101 struct dma_pl330_chan *pch = to_pchan(chan);
2102
2103 if (slave_config->direction == DMA_MEM_TO_DEV) {
2104 if (slave_config->dst_addr)
2105 pch->fifo_addr = slave_config->dst_addr;
2106 if (slave_config->dst_addr_width)
2107 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2108 if (slave_config->dst_maxburst)
2109 pch->burst_len = slave_config->dst_maxburst;
2110 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
2111 if (slave_config->src_addr)
2112 pch->fifo_addr = slave_config->src_addr;
2113 if (slave_config->src_addr_width)
2114 pch->burst_sz = __ffs(slave_config->src_addr_width);
2115 if (slave_config->src_maxburst)
2116 pch->burst_len = slave_config->src_maxburst;
2117 }
2118
2119 return 0;
2120}
2121
2122static int pl330_terminate_all(struct dma_chan *chan)
2090{ 2123{
2091 struct dma_pl330_chan *pch = to_pchan(chan); 2124 struct dma_pl330_chan *pch = to_pchan(chan);
2092 struct dma_pl330_desc *desc; 2125 struct dma_pl330_desc *desc;
2093 unsigned long flags; 2126 unsigned long flags;
2094 struct pl330_dmac *pl330 = pch->dmac; 2127 struct pl330_dmac *pl330 = pch->dmac;
2095 struct dma_slave_config *slave_config;
2096 LIST_HEAD(list); 2128 LIST_HEAD(list);
2097 2129
2098 switch (cmd) { 2130 spin_lock_irqsave(&pch->lock, flags);
2099 case DMA_TERMINATE_ALL: 2131 spin_lock(&pl330->lock);
2100 pm_runtime_get_sync(pl330->ddma.dev); 2132 _stop(pch->thread);
2101 spin_lock_irqsave(&pch->lock, flags); 2133 spin_unlock(&pl330->lock);
2134
2135 pch->thread->req[0].desc = NULL;
2136 pch->thread->req[1].desc = NULL;
2137 pch->thread->req_running = -1;
2138
2139 /* Mark all desc done */
2140 list_for_each_entry(desc, &pch->submitted_list, node) {
2141 desc->status = FREE;
2142 dma_cookie_complete(&desc->txd);
2143 }
2102 2144
2103 spin_lock(&pl330->lock); 2145 list_for_each_entry(desc, &pch->work_list , node) {
2104 _stop(pch->thread); 2146 desc->status = FREE;
2105 spin_unlock(&pl330->lock); 2147 dma_cookie_complete(&desc->txd);
2148 }
2106 2149
2107 pch->thread->req[0].desc = NULL; 2150 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
2108 pch->thread->req[1].desc = NULL; 2151 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2109 pch->thread->req_running = -1; 2152 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2153 spin_unlock_irqrestore(&pch->lock, flags);
2110 2154
2111 /* Mark all desc done */ 2155 return 0;
2112 list_for_each_entry(desc, &pch->submitted_list, node) { 2156}
2113 desc->status = FREE;
2114 dma_cookie_complete(&desc->txd);
2115 }
2116 2157
2117 list_for_each_entry(desc, &pch->work_list , node) { 2158/*
2118 desc->status = FREE; 2159 * We don't support DMA_RESUME command because of hardware
2119 dma_cookie_complete(&desc->txd); 2160 * limitations, so after pausing the channel we cannot restore
2120 } 2161 * it to active state. We have to terminate channel and setup
2162 * DMA transfer again. This pause feature was implemented to
2163 * allow safely read residue before channel termination.
2164 */
2165int pl330_pause(struct dma_chan *chan)
2166{
2167 struct dma_pl330_chan *pch = to_pchan(chan);
2168 struct pl330_dmac *pl330 = pch->dmac;
2169 unsigned long flags;
2121 2170
2122 list_for_each_entry(desc, &pch->completed_list , node) { 2171 pm_runtime_get_sync(pl330->ddma.dev);
2123 desc->status = FREE; 2172 spin_lock_irqsave(&pch->lock, flags);
2124 dma_cookie_complete(&desc->txd);
2125 }
2126 2173
2127 if (!list_empty(&pch->work_list)) 2174 spin_lock(&pl330->lock);
2128 pm_runtime_put(pl330->ddma.dev); 2175 _stop(pch->thread);
2176 spin_unlock(&pl330->lock);
2129 2177
2130 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); 2178 spin_unlock_irqrestore(&pch->lock, flags);
2131 list_splice_tail_init(&pch->work_list, &pl330->desc_pool); 2179 pm_runtime_mark_last_busy(pl330->ddma.dev);
2132 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); 2180 pm_runtime_put_autosuspend(pl330->ddma.dev);
2133 spin_unlock_irqrestore(&pch->lock, flags);
2134 pm_runtime_mark_last_busy(pl330->ddma.dev);
2135 pm_runtime_put_autosuspend(pl330->ddma.dev);
2136 break;
2137 case DMA_SLAVE_CONFIG:
2138 slave_config = (struct dma_slave_config *)arg;
2139
2140 if (slave_config->direction == DMA_MEM_TO_DEV) {
2141 if (slave_config->dst_addr)
2142 pch->fifo_addr = slave_config->dst_addr;
2143 if (slave_config->dst_addr_width)
2144 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2145 if (slave_config->dst_maxburst)
2146 pch->burst_len = slave_config->dst_maxburst;
2147 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
2148 if (slave_config->src_addr)
2149 pch->fifo_addr = slave_config->src_addr;
2150 if (slave_config->src_addr_width)
2151 pch->burst_sz = __ffs(slave_config->src_addr_width);
2152 if (slave_config->src_maxburst)
2153 pch->burst_len = slave_config->src_maxburst;
2154 }
2155 break;
2156 default:
2157 dev_err(pch->dmac->ddma.dev, "Not supported command.\n");
2158 return -ENXIO;
2159 }
2160 2181
2161 return 0; 2182 return 0;
2162} 2183}
@@ -2182,11 +2203,74 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
2182 pm_runtime_put_autosuspend(pch->dmac->ddma.dev); 2203 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2183} 2204}
2184 2205
2206int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
2207 struct dma_pl330_desc *desc)
2208{
2209 struct pl330_thread *thrd = pch->thread;
2210 struct pl330_dmac *pl330 = pch->dmac;
2211 void __iomem *regs = thrd->dmac->base;
2212 u32 val, addr;
2213
2214 pm_runtime_get_sync(pl330->ddma.dev);
2215 val = addr = 0;
2216 if (desc->rqcfg.src_inc) {
2217 val = readl(regs + SA(thrd->id));
2218 addr = desc->px.src_addr;
2219 } else {
2220 val = readl(regs + DA(thrd->id));
2221 addr = desc->px.dst_addr;
2222 }
2223 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2224 pm_runtime_put_autosuspend(pl330->ddma.dev);
2225 return val - addr;
2226}
2227
2185static enum dma_status 2228static enum dma_status
2186pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 2229pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2187 struct dma_tx_state *txstate) 2230 struct dma_tx_state *txstate)
2188{ 2231{
2189 return dma_cookie_status(chan, cookie, txstate); 2232 enum dma_status ret;
2233 unsigned long flags;
2234 struct dma_pl330_desc *desc, *running = NULL;
2235 struct dma_pl330_chan *pch = to_pchan(chan);
2236 unsigned int transferred, residual = 0;
2237
2238 ret = dma_cookie_status(chan, cookie, txstate);
2239
2240 if (!txstate)
2241 return ret;
2242
2243 if (ret == DMA_COMPLETE)
2244 goto out;
2245
2246 spin_lock_irqsave(&pch->lock, flags);
2247
2248 if (pch->thread->req_running != -1)
2249 running = pch->thread->req[pch->thread->req_running].desc;
2250
2251 /* Check in pending list */
2252 list_for_each_entry(desc, &pch->work_list, node) {
2253 if (desc->status == DONE)
2254 transferred = desc->bytes_requested;
2255 else if (running && desc == running)
2256 transferred =
2257 pl330_get_current_xferred_count(pch, desc);
2258 else
2259 transferred = 0;
2260 residual += desc->bytes_requested - transferred;
2261 if (desc->txd.cookie == cookie) {
2262 ret = desc->status;
2263 break;
2264 }
2265 if (desc->last)
2266 residual = 0;
2267 }
2268 spin_unlock_irqrestore(&pch->lock, flags);
2269
2270out:
2271 dma_set_residue(txstate, residual);
2272
2273 return ret;
2190} 2274}
2191 2275
2192static void pl330_issue_pending(struct dma_chan *chan) 2276static void pl330_issue_pending(struct dma_chan *chan)
@@ -2231,12 +2315,14 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2231 desc->txd.callback = last->txd.callback; 2315 desc->txd.callback = last->txd.callback;
2232 desc->txd.callback_param = last->txd.callback_param; 2316 desc->txd.callback_param = last->txd.callback_param;
2233 } 2317 }
2318 last->last = false;
2234 2319
2235 dma_cookie_assign(&desc->txd); 2320 dma_cookie_assign(&desc->txd);
2236 2321
2237 list_move_tail(&desc->node, &pch->submitted_list); 2322 list_move_tail(&desc->node, &pch->submitted_list);
2238 } 2323 }
2239 2324
2325 last->last = true;
2240 cookie = dma_cookie_assign(&last->txd); 2326 cookie = dma_cookie_assign(&last->txd);
2241 list_add_tail(&last->node, &pch->submitted_list); 2327 list_add_tail(&last->node, &pch->submitted_list);
2242 spin_unlock_irqrestore(&pch->lock, flags); 2328 spin_unlock_irqrestore(&pch->lock, flags);
@@ -2459,6 +2545,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2459 desc->rqtype = direction; 2545 desc->rqtype = direction;
2460 desc->rqcfg.brst_size = pch->burst_sz; 2546 desc->rqcfg.brst_size = pch->burst_sz;
2461 desc->rqcfg.brst_len = 1; 2547 desc->rqcfg.brst_len = 1;
2548 desc->bytes_requested = period_len;
2462 fill_px(&desc->px, dst, src, period_len); 2549 fill_px(&desc->px, dst, src, period_len);
2463 2550
2464 if (!first) 2551 if (!first)
@@ -2601,6 +2688,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2601 desc->rqcfg.brst_size = pch->burst_sz; 2688 desc->rqcfg.brst_size = pch->burst_sz;
2602 desc->rqcfg.brst_len = 1; 2689 desc->rqcfg.brst_len = 1;
2603 desc->rqtype = direction; 2690 desc->rqtype = direction;
2691 desc->bytes_requested = sg_dma_len(sg);
2604 } 2692 }
2605 2693
2606 /* Return the last desc in the chain */ 2694 /* Return the last desc in the chain */
@@ -2623,19 +2711,6 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
2623 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 2711 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2624 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) 2712 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2625 2713
2626static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
2627 struct dma_slave_caps *caps)
2628{
2629 caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
2630 caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
2631 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2632 caps->cmd_pause = false;
2633 caps->cmd_terminate = true;
2634 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2635
2636 return 0;
2637}
2638
2639/* 2714/*
2640 * Runtime PM callbacks are provided by amba/bus.c driver. 2715 * Runtime PM callbacks are provided by amba/bus.c driver.
2641 * 2716 *
@@ -2793,9 +2868,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2793 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; 2868 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
2794 pd->device_tx_status = pl330_tx_status; 2869 pd->device_tx_status = pl330_tx_status;
2795 pd->device_prep_slave_sg = pl330_prep_slave_sg; 2870 pd->device_prep_slave_sg = pl330_prep_slave_sg;
2796 pd->device_control = pl330_control; 2871 pd->device_config = pl330_config;
2872 pd->device_pause = pl330_pause;
2873 pd->device_terminate_all = pl330_terminate_all;
2797 pd->device_issue_pending = pl330_issue_pending; 2874 pd->device_issue_pending = pl330_issue_pending;
2798 pd->device_slave_caps = pl330_dma_device_slave_caps; 2875 pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
2876 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
2877 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2878 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2799 2879
2800 ret = dma_async_device_register(pd); 2880 ret = dma_async_device_register(pd);
2801 if (ret) { 2881 if (ret) {
@@ -2847,7 +2927,7 @@ probe_err3:
2847 2927
2848 /* Flush the channel */ 2928 /* Flush the channel */
2849 if (pch->thread) { 2929 if (pch->thread) {
2850 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); 2930 pl330_terminate_all(&pch->chan);
2851 pl330_free_chan_resources(&pch->chan); 2931 pl330_free_chan_resources(&pch->chan);
2852 } 2932 }
2853 } 2933 }
@@ -2878,7 +2958,7 @@ static int pl330_remove(struct amba_device *adev)
2878 2958
2879 /* Flush the channel */ 2959 /* Flush the channel */
2880 if (pch->thread) { 2960 if (pch->thread) {
2881 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); 2961 pl330_terminate_all(&pch->chan);
2882 pl330_free_chan_resources(&pch->chan); 2962 pl330_free_chan_resources(&pch->chan);
2883 } 2963 }
2884 } 2964 }
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
index 3122a99ec06b..d7a33b3ac466 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom_bam_dma.c
@@ -530,11 +530,18 @@ static void bam_free_chan(struct dma_chan *chan)
530 * Sets slave configuration for channel 530 * Sets slave configuration for channel
531 * 531 *
532 */ 532 */
533static void bam_slave_config(struct bam_chan *bchan, 533static int bam_slave_config(struct dma_chan *chan,
534 struct dma_slave_config *cfg) 534 struct dma_slave_config *cfg)
535{ 535{
536 struct bam_chan *bchan = to_bam_chan(chan);
537 unsigned long flag;
538
539 spin_lock_irqsave(&bchan->vc.lock, flag);
536 memcpy(&bchan->slave, cfg, sizeof(*cfg)); 540 memcpy(&bchan->slave, cfg, sizeof(*cfg));
537 bchan->reconfigure = 1; 541 bchan->reconfigure = 1;
542 spin_unlock_irqrestore(&bchan->vc.lock, flag);
543
544 return 0;
538} 545}
539 546
540/** 547/**
@@ -627,8 +634,9 @@ err_out:
627 * No callbacks are done 634 * No callbacks are done
628 * 635 *
629 */ 636 */
630static void bam_dma_terminate_all(struct bam_chan *bchan) 637static int bam_dma_terminate_all(struct dma_chan *chan)
631{ 638{
639 struct bam_chan *bchan = to_bam_chan(chan);
632 unsigned long flag; 640 unsigned long flag;
633 LIST_HEAD(head); 641 LIST_HEAD(head);
634 642
@@ -643,56 +651,46 @@ static void bam_dma_terminate_all(struct bam_chan *bchan)
643 spin_unlock_irqrestore(&bchan->vc.lock, flag); 651 spin_unlock_irqrestore(&bchan->vc.lock, flag);
644 652
645 vchan_dma_desc_free_list(&bchan->vc, &head); 653 vchan_dma_desc_free_list(&bchan->vc, &head);
654
655 return 0;
646} 656}
647 657
648/** 658/**
649 * bam_control - DMA device control 659 * bam_pause - Pause DMA channel
650 * @chan: dma channel 660 * @chan: dma channel
651 * @cmd: control cmd
652 * @arg: cmd argument
653 * 661 *
654 * Perform DMA control command 662 */
663static int bam_pause(struct dma_chan *chan)
664{
665 struct bam_chan *bchan = to_bam_chan(chan);
666 struct bam_device *bdev = bchan->bdev;
667 unsigned long flag;
668
669 spin_lock_irqsave(&bchan->vc.lock, flag);
670 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
671 bchan->paused = 1;
672 spin_unlock_irqrestore(&bchan->vc.lock, flag);
673
674 return 0;
675}
676
677/**
678 * bam_resume - Resume DMA channel operations
679 * @chan: dma channel
655 * 680 *
656 */ 681 */
657static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 682static int bam_resume(struct dma_chan *chan)
658 unsigned long arg)
659{ 683{
660 struct bam_chan *bchan = to_bam_chan(chan); 684 struct bam_chan *bchan = to_bam_chan(chan);
661 struct bam_device *bdev = bchan->bdev; 685 struct bam_device *bdev = bchan->bdev;
662 int ret = 0;
663 unsigned long flag; 686 unsigned long flag;
664 687
665 switch (cmd) { 688 spin_lock_irqsave(&bchan->vc.lock, flag);
666 case DMA_PAUSE: 689 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
667 spin_lock_irqsave(&bchan->vc.lock, flag); 690 bchan->paused = 0;
668 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); 691 spin_unlock_irqrestore(&bchan->vc.lock, flag);
669 bchan->paused = 1;
670 spin_unlock_irqrestore(&bchan->vc.lock, flag);
671 break;
672
673 case DMA_RESUME:
674 spin_lock_irqsave(&bchan->vc.lock, flag);
675 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
676 bchan->paused = 0;
677 spin_unlock_irqrestore(&bchan->vc.lock, flag);
678 break;
679
680 case DMA_TERMINATE_ALL:
681 bam_dma_terminate_all(bchan);
682 break;
683
684 case DMA_SLAVE_CONFIG:
685 spin_lock_irqsave(&bchan->vc.lock, flag);
686 bam_slave_config(bchan, (struct dma_slave_config *)arg);
687 spin_unlock_irqrestore(&bchan->vc.lock, flag);
688 break;
689
690 default:
691 ret = -ENXIO;
692 break;
693 }
694 692
695 return ret; 693 return 0;
696} 694}
697 695
698/** 696/**
@@ -1148,7 +1146,10 @@ static int bam_dma_probe(struct platform_device *pdev)
1148 bdev->common.device_alloc_chan_resources = bam_alloc_chan; 1146 bdev->common.device_alloc_chan_resources = bam_alloc_chan;
1149 bdev->common.device_free_chan_resources = bam_free_chan; 1147 bdev->common.device_free_chan_resources = bam_free_chan;
1150 bdev->common.device_prep_slave_sg = bam_prep_slave_sg; 1148 bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
1151 bdev->common.device_control = bam_control; 1149 bdev->common.device_config = bam_slave_config;
1150 bdev->common.device_pause = bam_pause;
1151 bdev->common.device_resume = bam_resume;
1152 bdev->common.device_terminate_all = bam_dma_terminate_all;
1152 bdev->common.device_issue_pending = bam_issue_pending; 1153 bdev->common.device_issue_pending = bam_issue_pending;
1153 bdev->common.device_tx_status = bam_tx_status; 1154 bdev->common.device_tx_status = bam_tx_status;
1154 bdev->common.dev = bdev->dev; 1155 bdev->common.dev = bdev->dev;
@@ -1187,7 +1188,7 @@ static int bam_dma_remove(struct platform_device *pdev)
1187 devm_free_irq(bdev->dev, bdev->irq, bdev); 1188 devm_free_irq(bdev->dev, bdev->irq, bdev);
1188 1189
1189 for (i = 0; i < bdev->num_channels; i++) { 1190 for (i = 0; i < bdev->num_channels; i++) {
1190 bam_dma_terminate_all(&bdev->channels[i]); 1191 bam_dma_terminate_all(&bdev->channels[i].vc.chan);
1191 tasklet_kill(&bdev->channels[i].vc.task); 1192 tasklet_kill(&bdev->channels[i].vc.task);
1192 1193
1193 dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, 1194 dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 6941a77521c3..2f91da3db836 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -384,20 +384,30 @@ static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
384 return tc * txd->width; 384 return tc * txd->width;
385} 385}
386 386
387static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan, 387static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan,
388 struct dma_slave_config *config) 388 struct dma_slave_config *config)
389{ 389{
390 if (!s3cchan->slave) 390 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
391 return -EINVAL; 391 unsigned long flags;
392 int ret = 0;
392 393
393 /* Reject definitely invalid configurations */ 394 /* Reject definitely invalid configurations */
394 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 395 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
395 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 396 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
396 return -EINVAL; 397 return -EINVAL;
397 398
399 spin_lock_irqsave(&s3cchan->vc.lock, flags);
400
401 if (!s3cchan->slave) {
402 ret = -EINVAL;
403 goto out;
404 }
405
398 s3cchan->cfg = *config; 406 s3cchan->cfg = *config;
399 407
400 return 0; 408out:
409 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
410 return ret;
401} 411}
402 412
403/* 413/*
@@ -703,8 +713,7 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
703 * The DMA ENGINE API 713 * The DMA ENGINE API
704 */ 714 */
705 715
706static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 716static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
707 unsigned long arg)
708{ 717{
709 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 718 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
710 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 719 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
@@ -713,40 +722,28 @@ static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
713 722
714 spin_lock_irqsave(&s3cchan->vc.lock, flags); 723 spin_lock_irqsave(&s3cchan->vc.lock, flags);
715 724
716 switch (cmd) { 725 if (!s3cchan->phy && !s3cchan->at) {
717 case DMA_SLAVE_CONFIG: 726 dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
718 ret = s3c24xx_dma_set_runtime_config(s3cchan, 727 s3cchan->id);
719 (struct dma_slave_config *)arg); 728 ret = -EINVAL;
720 break; 729 goto unlock;
721 case DMA_TERMINATE_ALL: 730 }
722 if (!s3cchan->phy && !s3cchan->at) {
723 dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
724 s3cchan->id);
725 ret = -EINVAL;
726 break;
727 }
728 731
729 s3cchan->state = S3C24XX_DMA_CHAN_IDLE; 732 s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
730 733
731 /* Mark physical channel as free */ 734 /* Mark physical channel as free */
732 if (s3cchan->phy) 735 if (s3cchan->phy)
733 s3c24xx_dma_phy_free(s3cchan); 736 s3c24xx_dma_phy_free(s3cchan);
734 737
735 /* Dequeue current job */ 738 /* Dequeue current job */
736 if (s3cchan->at) { 739 if (s3cchan->at) {
737 s3c24xx_dma_desc_free(&s3cchan->at->vd); 740 s3c24xx_dma_desc_free(&s3cchan->at->vd);
738 s3cchan->at = NULL; 741 s3cchan->at = NULL;
739 }
740
741 /* Dequeue jobs not yet fired as well */
742 s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
743 break;
744 default:
745 /* Unknown command */
746 ret = -ENXIO;
747 break;
748 } 742 }
749 743
744 /* Dequeue jobs not yet fired as well */
745 s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
746unlock:
750 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 747 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
751 748
752 return ret; 749 return ret;
@@ -1300,7 +1297,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
1300 s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; 1297 s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
1301 s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status; 1298 s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
1302 s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; 1299 s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
1303 s3cdma->memcpy.device_control = s3c24xx_dma_control; 1300 s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
1301 s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
1304 1302
1305 /* Initialize slave engine for SoC internal dedicated peripherals */ 1303 /* Initialize slave engine for SoC internal dedicated peripherals */
1306 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); 1304 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
@@ -1315,7 +1313,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
1315 s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; 1313 s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
1316 s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; 1314 s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
1317 s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; 1315 s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
1318 s3cdma->slave.device_control = s3c24xx_dma_control; 1316 s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
1317 s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
1319 1318
1320 /* Register as many memcpy channels as there are physical channels */ 1319 /* Register as many memcpy channels as there are physical channels */
1321 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy, 1320 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 96bb62c39c41..5adf5407a8cb 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -669,8 +669,10 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
669 return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 669 return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
670} 670}
671 671
672static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) 672static int sa11x0_dma_device_config(struct dma_chan *chan,
673 struct dma_slave_config *cfg)
673{ 674{
675 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
674 u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); 676 u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
675 dma_addr_t addr; 677 dma_addr_t addr;
676 enum dma_slave_buswidth width; 678 enum dma_slave_buswidth width;
@@ -704,99 +706,101 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c
704 return 0; 706 return 0;
705} 707}
706 708
707static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 709static int sa11x0_dma_device_pause(struct dma_chan *chan)
708 unsigned long arg)
709{ 710{
710 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 711 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
711 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 712 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
712 struct sa11x0_dma_phy *p; 713 struct sa11x0_dma_phy *p;
713 LIST_HEAD(head); 714 LIST_HEAD(head);
714 unsigned long flags; 715 unsigned long flags;
715 int ret;
716 716
717 switch (cmd) { 717 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
718 case DMA_SLAVE_CONFIG: 718 spin_lock_irqsave(&c->vc.lock, flags);
719 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); 719 if (c->status == DMA_IN_PROGRESS) {
720 720 c->status = DMA_PAUSED;
721 case DMA_TERMINATE_ALL:
722 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
723 /* Clear the tx descriptor lists */
724 spin_lock_irqsave(&c->vc.lock, flags);
725 vchan_get_all_descriptors(&c->vc, &head);
726 721
727 p = c->phy; 722 p = c->phy;
728 if (p) { 723 if (p) {
729 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); 724 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
730 /* vchan is assigned to a pchan - stop the channel */ 725 } else {
731 writel(DCSR_RUN | DCSR_IE |
732 DCSR_STRTA | DCSR_DONEA |
733 DCSR_STRTB | DCSR_DONEB,
734 p->base + DMA_DCSR_C);
735
736 if (p->txd_load) {
737 if (p->txd_load != p->txd_done)
738 list_add_tail(&p->txd_load->vd.node, &head);
739 p->txd_load = NULL;
740 }
741 if (p->txd_done) {
742 list_add_tail(&p->txd_done->vd.node, &head);
743 p->txd_done = NULL;
744 }
745 c->phy = NULL;
746 spin_lock(&d->lock); 726 spin_lock(&d->lock);
747 p->vchan = NULL; 727 list_del_init(&c->node);
748 spin_unlock(&d->lock); 728 spin_unlock(&d->lock);
749 tasklet_schedule(&d->task);
750 } 729 }
751 spin_unlock_irqrestore(&c->vc.lock, flags); 730 }
752 vchan_dma_desc_free_list(&c->vc, &head); 731 spin_unlock_irqrestore(&c->vc.lock, flags);
753 ret = 0;
754 break;
755 732
756 case DMA_PAUSE: 733 return 0;
757 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); 734}
758 spin_lock_irqsave(&c->vc.lock, flags);
759 if (c->status == DMA_IN_PROGRESS) {
760 c->status = DMA_PAUSED;
761 735
762 p = c->phy; 736static int sa11x0_dma_device_resume(struct dma_chan *chan)
763 if (p) { 737{
764 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); 738 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
765 } else { 739 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
766 spin_lock(&d->lock); 740 struct sa11x0_dma_phy *p;
767 list_del_init(&c->node); 741 LIST_HEAD(head);
768 spin_unlock(&d->lock); 742 unsigned long flags;
769 }
770 }
771 spin_unlock_irqrestore(&c->vc.lock, flags);
772 ret = 0;
773 break;
774 743
775 case DMA_RESUME: 744 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
776 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); 745 spin_lock_irqsave(&c->vc.lock, flags);
777 spin_lock_irqsave(&c->vc.lock, flags); 746 if (c->status == DMA_PAUSED) {
778 if (c->status == DMA_PAUSED) { 747 c->status = DMA_IN_PROGRESS;
779 c->status = DMA_IN_PROGRESS; 748
780 749 p = c->phy;
781 p = c->phy; 750 if (p) {
782 if (p) { 751 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
783 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); 752 } else if (!list_empty(&c->vc.desc_issued)) {
784 } else if (!list_empty(&c->vc.desc_issued)) { 753 spin_lock(&d->lock);
785 spin_lock(&d->lock); 754 list_add_tail(&c->node, &d->chan_pending);
786 list_add_tail(&c->node, &d->chan_pending); 755 spin_unlock(&d->lock);
787 spin_unlock(&d->lock);
788 }
789 } 756 }
790 spin_unlock_irqrestore(&c->vc.lock, flags); 757 }
791 ret = 0; 758 spin_unlock_irqrestore(&c->vc.lock, flags);
792 break;
793 759
794 default: 760 return 0;
795 ret = -ENXIO; 761}
796 break; 762
763static int sa11x0_dma_device_terminate_all(struct dma_chan *chan)
764{
765 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
766 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
767 struct sa11x0_dma_phy *p;
768 LIST_HEAD(head);
769 unsigned long flags;
770
771 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
772 /* Clear the tx descriptor lists */
773 spin_lock_irqsave(&c->vc.lock, flags);
774 vchan_get_all_descriptors(&c->vc, &head);
775
776 p = c->phy;
777 if (p) {
778 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
779 /* vchan is assigned to a pchan - stop the channel */
780 writel(DCSR_RUN | DCSR_IE |
781 DCSR_STRTA | DCSR_DONEA |
782 DCSR_STRTB | DCSR_DONEB,
783 p->base + DMA_DCSR_C);
784
785 if (p->txd_load) {
786 if (p->txd_load != p->txd_done)
787 list_add_tail(&p->txd_load->vd.node, &head);
788 p->txd_load = NULL;
789 }
790 if (p->txd_done) {
791 list_add_tail(&p->txd_done->vd.node, &head);
792 p->txd_done = NULL;
793 }
794 c->phy = NULL;
795 spin_lock(&d->lock);
796 p->vchan = NULL;
797 spin_unlock(&d->lock);
798 tasklet_schedule(&d->task);
797 } 799 }
800 spin_unlock_irqrestore(&c->vc.lock, flags);
801 vchan_dma_desc_free_list(&c->vc, &head);
798 802
799 return ret; 803 return 0;
800} 804}
801 805
802struct sa11x0_dma_channel_desc { 806struct sa11x0_dma_channel_desc {
@@ -833,7 +837,10 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
833 dmadev->dev = dev; 837 dmadev->dev = dev;
834 dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; 838 dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
835 dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; 839 dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
836 dmadev->device_control = sa11x0_dma_control; 840 dmadev->device_config = sa11x0_dma_device_config;
841 dmadev->device_pause = sa11x0_dma_device_pause;
842 dmadev->device_resume = sa11x0_dma_device_resume;
843 dmadev->device_terminate_all = sa11x0_dma_device_terminate_all;
837 dmadev->device_tx_status = sa11x0_dma_tx_status; 844 dmadev->device_tx_status = sa11x0_dma_tx_status;
838 dmadev->device_issue_pending = sa11x0_dma_issue_pending; 845 dmadev->device_issue_pending = sa11x0_dma_issue_pending;
839 846
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 0349125a2e20..8190ad225a1b 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -2,6 +2,10 @@
2# DMA engine configuration for sh 2# DMA engine configuration for sh
3# 3#
4 4
5config RENESAS_DMA
6 bool
7 select DMA_ENGINE
8
5# 9#
6# DMA Engine Helpers 10# DMA Engine Helpers
7# 11#
@@ -12,7 +16,7 @@ config SH_DMAE_BASE
12 depends on !SUPERH || SH_DMA 16 depends on !SUPERH || SH_DMA
13 depends on !SH_DMA_API 17 depends on !SH_DMA_API
14 default y 18 default y
15 select DMA_ENGINE 19 select RENESAS_DMA
16 help 20 help
17 Enable support for the Renesas SuperH DMA controllers. 21 Enable support for the Renesas SuperH DMA controllers.
18 22
@@ -52,3 +56,11 @@ config RCAR_AUDMAC_PP
52 depends on SH_DMAE_BASE 56 depends on SH_DMAE_BASE
53 help 57 help
54 Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers. 58 Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
59
60config RCAR_DMAC
61 tristate "Renesas R-Car Gen2 DMA Controller"
62 depends on ARCH_SHMOBILE || COMPILE_TEST
63 select RENESAS_DMA
64 help
65 This driver supports the general purpose DMA controller found in the
66 Renesas R-Car second generation SoCs.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 0a5cfdb76e45..2852f9db61a4 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
16obj-$(CONFIG_SUDMAC) += sudmac.o 16obj-$(CONFIG_SUDMAC) += sudmac.o
17obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o 17obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
18obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o 18obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
19obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
new file mode 100644
index 000000000000..a18d16cc4795
--- /dev/null
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -0,0 +1,1770 @@
1/*
2 * Renesas R-Car Gen2 DMA Controller Driver
3 *
4 * Copyright (C) 2014 Renesas Electronics Inc.
5 *
6 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
7 *
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/dma-mapping.h>
14#include <linux/dmaengine.h>
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/of.h>
20#include <linux/of_dma.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/pm_runtime.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26
27#include "../dmaengine.h"
28
29/*
30 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
31 * @node: entry in the parent's chunks list
32 * @src_addr: device source address
33 * @dst_addr: device destination address
34 * @size: transfer size in bytes
35 */
36struct rcar_dmac_xfer_chunk {
37 struct list_head node;
38
39 dma_addr_t src_addr;
40 dma_addr_t dst_addr;
41 u32 size;
42};
43
44/*
45 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
46 * @sar: value of the SAR register (source address)
47 * @dar: value of the DAR register (destination address)
48 * @tcr: value of the TCR register (transfer count)
49 */
50struct rcar_dmac_hw_desc {
51 u32 sar;
52 u32 dar;
53 u32 tcr;
54 u32 reserved;
55} __attribute__((__packed__));
56
57/*
58 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
59 * @async_tx: base DMA asynchronous transaction descriptor
60 * @direction: direction of the DMA transfer
61 * @xfer_shift: log2 of the transfer size
62 * @chcr: value of the channel configuration register for this transfer
63 * @node: entry in the channel's descriptors lists
64 * @chunks: list of transfer chunks for this transfer
65 * @running: the transfer chunk being currently processed
66 * @nchunks: number of transfer chunks for this transfer
67 * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
68 * @hwdescs.mem: hardware descriptors memory for the transfer
69 * @hwdescs.dma: device address of the hardware descriptors memory
70 * @hwdescs.size: size of the hardware descriptors in bytes
71 * @size: transfer size in bytes
72 * @cyclic: when set indicates that the DMA transfer is cyclic
73 */
74struct rcar_dmac_desc {
75 struct dma_async_tx_descriptor async_tx;
76 enum dma_transfer_direction direction;
77 unsigned int xfer_shift;
78 u32 chcr;
79
80 struct list_head node;
81 struct list_head chunks;
82 struct rcar_dmac_xfer_chunk *running;
83 unsigned int nchunks;
84
85 struct {
86 bool use;
87 struct rcar_dmac_hw_desc *mem;
88 dma_addr_t dma;
89 size_t size;
90 } hwdescs;
91
92 unsigned int size;
93 bool cyclic;
94};
95
96#define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
97
98/*
99 * struct rcar_dmac_desc_page - One page worth of descriptors
100 * @node: entry in the channel's pages list
101 * @descs: array of DMA descriptors
102 * @chunks: array of transfer chunk descriptors
103 */
104struct rcar_dmac_desc_page {
105 struct list_head node;
106
107 union {
108 struct rcar_dmac_desc descs[0];
109 struct rcar_dmac_xfer_chunk chunks[0];
110 };
111};
112
113#define RCAR_DMAC_DESCS_PER_PAGE \
114 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
115 sizeof(struct rcar_dmac_desc))
116#define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
117 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
118 sizeof(struct rcar_dmac_xfer_chunk))
119
120/*
121 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
122 * @chan: base DMA channel object
123 * @iomem: channel I/O memory base
124 * @index: index of this channel in the controller
125 * @src_xfer_size: size (in bytes) of hardware transfers on the source side
126 * @dst_xfer_size: size (in bytes) of hardware transfers on the destination side
127 * @src_slave_addr: slave source memory address
128 * @dst_slave_addr: slave destination memory address
129 * @mid_rid: hardware MID/RID for the DMA client using this channel
130 * @lock: protects the channel CHCR register and the desc members
131 * @desc.free: list of free descriptors
132 * @desc.pending: list of pending descriptors (submitted with tx_submit)
133 * @desc.active: list of active descriptors (activated with issue_pending)
134 * @desc.done: list of completed descriptors
135 * @desc.wait: list of descriptors waiting for an ack
136 * @desc.running: the descriptor being processed (a member of the active list)
137 * @desc.chunks_free: list of free transfer chunk descriptors
138 * @desc.pages: list of pages used by allocated descriptors
139 */
140struct rcar_dmac_chan {
141 struct dma_chan chan;
142 void __iomem *iomem;
143 unsigned int index;
144
145 unsigned int src_xfer_size;
146 unsigned int dst_xfer_size;
147 dma_addr_t src_slave_addr;
148 dma_addr_t dst_slave_addr;
149 int mid_rid;
150
151 spinlock_t lock;
152
153 struct {
154 struct list_head free;
155 struct list_head pending;
156 struct list_head active;
157 struct list_head done;
158 struct list_head wait;
159 struct rcar_dmac_desc *running;
160
161 struct list_head chunks_free;
162
163 struct list_head pages;
164 } desc;
165};
166
167#define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
168
169/*
170 * struct rcar_dmac - R-Car Gen2 DMA Controller
171 * @engine: base DMA engine object
172 * @dev: the hardware device
173 * @iomem: remapped I/O memory base
174 * @n_channels: number of available channels
175 * @channels: array of DMAC channels
176 * @modules: bitmask of client modules in use
177 */
178struct rcar_dmac {
179 struct dma_device engine;
180 struct device *dev;
181 void __iomem *iomem;
182
183 unsigned int n_channels;
184 struct rcar_dmac_chan *channels;
185
186 unsigned long modules[256 / BITS_PER_LONG];
187};
188
189#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
190
191/* -----------------------------------------------------------------------------
192 * Registers
193 */
194
195#define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
196
197#define RCAR_DMAISTA 0x0020
198#define RCAR_DMASEC 0x0030
199#define RCAR_DMAOR 0x0060
200#define RCAR_DMAOR_PRI_FIXED (0 << 8)
201#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
202#define RCAR_DMAOR_AE (1 << 2)
203#define RCAR_DMAOR_DME (1 << 0)
204#define RCAR_DMACHCLR 0x0080
205#define RCAR_DMADPSEC 0x00a0
206
207#define RCAR_DMASAR 0x0000
208#define RCAR_DMADAR 0x0004
209#define RCAR_DMATCR 0x0008
210#define RCAR_DMATCR_MASK 0x00ffffff
211#define RCAR_DMATSR 0x0028
212#define RCAR_DMACHCR 0x000c
213#define RCAR_DMACHCR_CAE (1 << 31)
214#define RCAR_DMACHCR_CAIE (1 << 30)
215#define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
216#define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
217#define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
218#define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
219#define RCAR_DMACHCR_RPT_SAR (1 << 27)
220#define RCAR_DMACHCR_RPT_DAR (1 << 26)
221#define RCAR_DMACHCR_RPT_TCR (1 << 25)
222#define RCAR_DMACHCR_DPB (1 << 22)
223#define RCAR_DMACHCR_DSE (1 << 19)
224#define RCAR_DMACHCR_DSIE (1 << 18)
225#define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
226#define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
227#define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
228#define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
229#define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
230#define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
231#define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
232#define RCAR_DMACHCR_DM_FIXED (0 << 14)
233#define RCAR_DMACHCR_DM_INC (1 << 14)
234#define RCAR_DMACHCR_DM_DEC (2 << 14)
235#define RCAR_DMACHCR_SM_FIXED (0 << 12)
236#define RCAR_DMACHCR_SM_INC (1 << 12)
237#define RCAR_DMACHCR_SM_DEC (2 << 12)
238#define RCAR_DMACHCR_RS_AUTO (4 << 8)
239#define RCAR_DMACHCR_RS_DMARS (8 << 8)
240#define RCAR_DMACHCR_IE (1 << 2)
241#define RCAR_DMACHCR_TE (1 << 1)
242#define RCAR_DMACHCR_DE (1 << 0)
243#define RCAR_DMATCRB 0x0018
244#define RCAR_DMATSRB 0x0038
245#define RCAR_DMACHCRB 0x001c
246#define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
247#define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
248#define RCAR_DMACHCRB_DPTR_SHIFT 16
249#define RCAR_DMACHCRB_DRST (1 << 15)
250#define RCAR_DMACHCRB_DTS (1 << 8)
251#define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
252#define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
253#define RCAR_DMACHCRB_PRI(n) ((n) << 0)
254#define RCAR_DMARS 0x0040
255#define RCAR_DMABUFCR 0x0048
256#define RCAR_DMABUFCR_MBU(n) ((n) << 16)
257#define RCAR_DMABUFCR_ULB(n) ((n) << 0)
258#define RCAR_DMADPBASE 0x0050
259#define RCAR_DMADPBASE_MASK 0xfffffff0
260#define RCAR_DMADPBASE_SEL (1 << 0)
261#define RCAR_DMADPCR 0x0054
262#define RCAR_DMADPCR_DIPT(n) ((n) << 24)
263#define RCAR_DMAFIXSAR 0x0010
264#define RCAR_DMAFIXDAR 0x0014
265#define RCAR_DMAFIXDPBASE 0x0060
266
267/* Hardcode the MEMCPY transfer size to 4 bytes. */
268#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
269
270/* -----------------------------------------------------------------------------
271 * Device access
272 */
273
274static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
275{
276 if (reg == RCAR_DMAOR)
277 writew(data, dmac->iomem + reg);
278 else
279 writel(data, dmac->iomem + reg);
280}
281
282static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
283{
284 if (reg == RCAR_DMAOR)
285 return readw(dmac->iomem + reg);
286 else
287 return readl(dmac->iomem + reg);
288}
289
290static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
291{
292 if (reg == RCAR_DMARS)
293 return readw(chan->iomem + reg);
294 else
295 return readl(chan->iomem + reg);
296}
297
298static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
299{
300 if (reg == RCAR_DMARS)
301 writew(data, chan->iomem + reg);
302 else
303 writel(data, chan->iomem + reg);
304}
305
306/* -----------------------------------------------------------------------------
307 * Initialization and configuration
308 */
309
310static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
311{
312 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
313
314 return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE;
315}
316
317static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
318{
319 struct rcar_dmac_desc *desc = chan->desc.running;
320 u32 chcr = desc->chcr;
321
322 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
323
324 if (chan->mid_rid >= 0)
325 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
326
327 if (desc->hwdescs.use) {
328 struct rcar_dmac_xfer_chunk *chunk;
329
330 dev_dbg(chan->chan.device->dev,
331 "chan%u: queue desc %p: %u@%pad\n",
332 chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
333
334#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
335 rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
336 desc->hwdescs.dma >> 32);
337#endif
338 rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
339 (desc->hwdescs.dma & 0xfffffff0) |
340 RCAR_DMADPBASE_SEL);
341 rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
342 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
343 RCAR_DMACHCRB_DRST);
344
345 /*
346 * Errata: When descriptor memory is accessed through an IOMMU
347 * the DMADAR register isn't initialized automatically from the
348 * first descriptor at beginning of transfer by the DMAC like it
349 * should. Initialize it manually with the destination address
350 * of the first chunk.
351 */
352 chunk = list_first_entry(&desc->chunks,
353 struct rcar_dmac_xfer_chunk, node);
354 rcar_dmac_chan_write(chan, RCAR_DMADAR,
355 chunk->dst_addr & 0xffffffff);
356
357 /*
358 * Program the descriptor stage interrupt to occur after the end
359 * of the first stage.
360 */
361 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
362
363 chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
364 | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
365
366 /*
367 * If the descriptor isn't cyclic enable normal descriptor mode
368 * and the transfer completion interrupt.
369 */
370 if (!desc->cyclic)
371 chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
372 /*
373 * If the descriptor is cyclic and has a callback enable the
374 * descriptor stage interrupt in infinite repeat mode.
375 */
376 else if (desc->async_tx.callback)
377 chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
378 /*
379 * Otherwise just select infinite repeat mode without any
380 * interrupt.
381 */
382 else
383 chcr |= RCAR_DMACHCR_DPM_INFINITE;
384 } else {
385 struct rcar_dmac_xfer_chunk *chunk = desc->running;
386
387 dev_dbg(chan->chan.device->dev,
388 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
389 chan->index, chunk, chunk->size, &chunk->src_addr,
390 &chunk->dst_addr);
391
392#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
393 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
394 chunk->src_addr >> 32);
395 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
396 chunk->dst_addr >> 32);
397#endif
398 rcar_dmac_chan_write(chan, RCAR_DMASAR,
399 chunk->src_addr & 0xffffffff);
400 rcar_dmac_chan_write(chan, RCAR_DMADAR,
401 chunk->dst_addr & 0xffffffff);
402 rcar_dmac_chan_write(chan, RCAR_DMATCR,
403 chunk->size >> desc->xfer_shift);
404
405 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
406 }
407
408 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE);
409}
410
411static int rcar_dmac_init(struct rcar_dmac *dmac)
412{
413 u16 dmaor;
414
415 /* Clear all channels and enable the DMAC globally. */
416 rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff);
417 rcar_dmac_write(dmac, RCAR_DMAOR,
418 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
419
420 dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
421 if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
422 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
423 return -EIO;
424 }
425
426 return 0;
427}
428
429/* -----------------------------------------------------------------------------
430 * Descriptors submission
431 */
432
433static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
434{
435 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
436 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
437 unsigned long flags;
438 dma_cookie_t cookie;
439
440 spin_lock_irqsave(&chan->lock, flags);
441
442 cookie = dma_cookie_assign(tx);
443
444 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
445 chan->index, tx->cookie, desc);
446
447 list_add_tail(&desc->node, &chan->desc.pending);
448 desc->running = list_first_entry(&desc->chunks,
449 struct rcar_dmac_xfer_chunk, node);
450
451 spin_unlock_irqrestore(&chan->lock, flags);
452
453 return cookie;
454}
455
456/* -----------------------------------------------------------------------------
457 * Descriptors allocation and free
458 */
459
460/*
461 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
462 * @chan: the DMA channel
463 * @gfp: allocation flags
464 */
465static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
466{
467 struct rcar_dmac_desc_page *page;
468 LIST_HEAD(list);
469 unsigned int i;
470
471 page = (void *)get_zeroed_page(gfp);
472 if (!page)
473 return -ENOMEM;
474
475 for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
476 struct rcar_dmac_desc *desc = &page->descs[i];
477
478 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
479 desc->async_tx.tx_submit = rcar_dmac_tx_submit;
480 INIT_LIST_HEAD(&desc->chunks);
481
482 list_add_tail(&desc->node, &list);
483 }
484
485 spin_lock_irq(&chan->lock);
486 list_splice_tail(&list, &chan->desc.free);
487 list_add_tail(&page->node, &chan->desc.pages);
488 spin_unlock_irq(&chan->lock);
489
490 return 0;
491}
492
493/*
494 * rcar_dmac_desc_put - Release a DMA transfer descriptor
495 * @chan: the DMA channel
496 * @desc: the descriptor
497 *
498 * Put the descriptor and its transfer chunk descriptors back in the channel's
499 * free descriptors lists. The descriptor's chunks list will be reinitialized to
500 * an empty list as a result.
501 *
502 * The descriptor must have been removed from the channel's lists before calling
503 * this function.
504 */
505static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
506 struct rcar_dmac_desc *desc)
507{
508 unsigned long flags;
509
510 spin_lock_irqsave(&chan->lock, flags);
511 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
512 list_add_tail(&desc->node, &chan->desc.free);
513 spin_unlock_irqrestore(&chan->lock, flags);
514}
515
516static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
517{
518 struct rcar_dmac_desc *desc, *_desc;
519 LIST_HEAD(list);
520
521 /*
522 * We have to temporarily move all descriptors from the wait list to a
523 * local list as iterating over the wait list, even with
524 * list_for_each_entry_safe, isn't safe if we release the channel lock
525 * around the rcar_dmac_desc_put() call.
526 */
527 spin_lock_irq(&chan->lock);
528 list_splice_init(&chan->desc.wait, &list);
529 spin_unlock_irq(&chan->lock);
530
531 list_for_each_entry_safe(desc, _desc, &list, node) {
532 if (async_tx_test_ack(&desc->async_tx)) {
533 list_del(&desc->node);
534 rcar_dmac_desc_put(chan, desc);
535 }
536 }
537
538 if (list_empty(&list))
539 return;
540
541 /* Put the remaining descriptors back in the wait list. */
542 spin_lock_irq(&chan->lock);
543 list_splice(&list, &chan->desc.wait);
544 spin_unlock_irq(&chan->lock);
545}
546
547/*
548 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
549 * @chan: the DMA channel
550 *
551 * Locking: This function must be called in a non-atomic context.
552 *
553 * Return: A pointer to the allocated descriptor or NULL if no descriptor can
554 * be allocated.
555 */
556static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
557{
558 struct rcar_dmac_desc *desc;
559 int ret;
560
561 /* Recycle acked descriptors before attempting allocation. */
562 rcar_dmac_desc_recycle_acked(chan);
563
564 spin_lock_irq(&chan->lock);
565
566 while (list_empty(&chan->desc.free)) {
567 /*
568 * No free descriptors, allocate a page worth of them and try
569 * again, as someone else could race us to get the newly
570 * allocated descriptors. If the allocation fails return an
571 * error.
572 */
573 spin_unlock_irq(&chan->lock);
574 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
575 if (ret < 0)
576 return NULL;
577 spin_lock_irq(&chan->lock);
578 }
579
580 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
581 list_del(&desc->node);
582
583 spin_unlock_irq(&chan->lock);
584
585 return desc;
586}
587
588/*
589 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
590 * @chan: the DMA channel
591 * @gfp: allocation flags
592 */
593static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
594{
595 struct rcar_dmac_desc_page *page;
596 LIST_HEAD(list);
597 unsigned int i;
598
599 page = (void *)get_zeroed_page(gfp);
600 if (!page)
601 return -ENOMEM;
602
603 for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
604 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
605
606 list_add_tail(&chunk->node, &list);
607 }
608
609 spin_lock_irq(&chan->lock);
610 list_splice_tail(&list, &chan->desc.chunks_free);
611 list_add_tail(&page->node, &chan->desc.pages);
612 spin_unlock_irq(&chan->lock);
613
614 return 0;
615}
616
617/*
618 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
619 * @chan: the DMA channel
620 *
621 * Locking: This function must be called in a non-atomic context.
622 *
623 * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
624 * descriptor can be allocated.
625 */
626static struct rcar_dmac_xfer_chunk *
627rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
628{
629 struct rcar_dmac_xfer_chunk *chunk;
630 int ret;
631
632 spin_lock_irq(&chan->lock);
633
634 while (list_empty(&chan->desc.chunks_free)) {
635 /*
636 * No free descriptors, allocate a page worth of them and try
637 * again, as someone else could race us to get the newly
638 * allocated descriptors. If the allocation fails return an
639 * error.
640 */
641 spin_unlock_irq(&chan->lock);
642 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
643 if (ret < 0)
644 return NULL;
645 spin_lock_irq(&chan->lock);
646 }
647
648 chunk = list_first_entry(&chan->desc.chunks_free,
649 struct rcar_dmac_xfer_chunk, node);
650 list_del(&chunk->node);
651
652 spin_unlock_irq(&chan->lock);
653
654 return chunk;
655}
656
657static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
658 struct rcar_dmac_desc *desc, size_t size)
659{
660 /*
661 * dma_alloc_coherent() allocates memory in page size increments. To
662 * avoid reallocating the hardware descriptors when the allocated size
663 * wouldn't change align the requested size to a multiple of the page
664 * size.
665 */
666 size = PAGE_ALIGN(size);
667
668 if (desc->hwdescs.size == size)
669 return;
670
671 if (desc->hwdescs.mem) {
672 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
673 desc->hwdescs.mem, desc->hwdescs.dma);
674 desc->hwdescs.mem = NULL;
675 desc->hwdescs.size = 0;
676 }
677
678 if (!size)
679 return;
680
681 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
682 &desc->hwdescs.dma, GFP_NOWAIT);
683 if (!desc->hwdescs.mem)
684 return;
685
686 desc->hwdescs.size = size;
687}
688
689static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
690 struct rcar_dmac_desc *desc)
691{
692 struct rcar_dmac_xfer_chunk *chunk;
693 struct rcar_dmac_hw_desc *hwdesc;
694
695 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
696
697 hwdesc = desc->hwdescs.mem;
698 if (!hwdesc)
699 return -ENOMEM;
700
701 list_for_each_entry(chunk, &desc->chunks, node) {
702 hwdesc->sar = chunk->src_addr;
703 hwdesc->dar = chunk->dst_addr;
704 hwdesc->tcr = chunk->size >> desc->xfer_shift;
705 hwdesc++;
706 }
707
708 return 0;
709}
710
711/* -----------------------------------------------------------------------------
712 * Stop and reset
713 */
714
715static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
716{
717 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
718
719 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
720 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
721 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
722}
723
724static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
725{
726 struct rcar_dmac_desc *desc, *_desc;
727 unsigned long flags;
728 LIST_HEAD(descs);
729
730 spin_lock_irqsave(&chan->lock, flags);
731
732 /* Move all non-free descriptors to the local lists. */
733 list_splice_init(&chan->desc.pending, &descs);
734 list_splice_init(&chan->desc.active, &descs);
735 list_splice_init(&chan->desc.done, &descs);
736 list_splice_init(&chan->desc.wait, &descs);
737
738 chan->desc.running = NULL;
739
740 spin_unlock_irqrestore(&chan->lock, flags);
741
742 list_for_each_entry_safe(desc, _desc, &descs, node) {
743 list_del(&desc->node);
744 rcar_dmac_desc_put(chan, desc);
745 }
746}
747
748static void rcar_dmac_stop(struct rcar_dmac *dmac)
749{
750 rcar_dmac_write(dmac, RCAR_DMAOR, 0);
751}
752
753static void rcar_dmac_abort(struct rcar_dmac *dmac)
754{
755 unsigned int i;
756
757 /* Stop all channels. */
758 for (i = 0; i < dmac->n_channels; ++i) {
759 struct rcar_dmac_chan *chan = &dmac->channels[i];
760
761 /* Stop and reinitialize the channel. */
762 spin_lock(&chan->lock);
763 rcar_dmac_chan_halt(chan);
764 spin_unlock(&chan->lock);
765
766 rcar_dmac_chan_reinit(chan);
767 }
768}
769
770/* -----------------------------------------------------------------------------
771 * Descriptors preparation
772 */
773
774static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
775 struct rcar_dmac_desc *desc)
776{
777 static const u32 chcr_ts[] = {
778 RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
779 RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
780 RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
781 RCAR_DMACHCR_TS_64B,
782 };
783
784 unsigned int xfer_size;
785 u32 chcr;
786
787 switch (desc->direction) {
788 case DMA_DEV_TO_MEM:
789 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
790 | RCAR_DMACHCR_RS_DMARS;
791 xfer_size = chan->src_xfer_size;
792 break;
793
794 case DMA_MEM_TO_DEV:
795 chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
796 | RCAR_DMACHCR_RS_DMARS;
797 xfer_size = chan->dst_xfer_size;
798 break;
799
800 case DMA_MEM_TO_MEM:
801 default:
802 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
803 | RCAR_DMACHCR_RS_AUTO;
804 xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
805 break;
806 }
807
808 desc->xfer_shift = ilog2(xfer_size);
809 desc->chcr = chcr | chcr_ts[desc->xfer_shift];
810}
811
812/*
813 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
814 *
815 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
816 * converted to scatter-gather to guarantee consistent locking and a correct
817 * list manipulation. For slave DMA direction carries the usual meaning, and,
818 * logically, the SG list is RAM and the addr variable contains slave address,
819 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
820 * and the SG list contains only one element and points at the source buffer.
821 */
822static struct dma_async_tx_descriptor *
823rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
824 unsigned int sg_len, dma_addr_t dev_addr,
825 enum dma_transfer_direction dir, unsigned long dma_flags,
826 bool cyclic)
827{
828 struct rcar_dmac_xfer_chunk *chunk;
829 struct rcar_dmac_desc *desc;
830 struct scatterlist *sg;
831 unsigned int nchunks = 0;
832 unsigned int max_chunk_size;
833 unsigned int full_size = 0;
834 bool highmem = false;
835 unsigned int i;
836
837 desc = rcar_dmac_desc_get(chan);
838 if (!desc)
839 return NULL;
840
841 desc->async_tx.flags = dma_flags;
842 desc->async_tx.cookie = -EBUSY;
843
844 desc->cyclic = cyclic;
845 desc->direction = dir;
846
847 rcar_dmac_chan_configure_desc(chan, desc);
848
849 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
850
851 /*
852 * Allocate and fill the transfer chunk descriptors. We own the only
853 * reference to the DMA descriptor, there's no need for locking.
854 */
855 for_each_sg(sgl, sg, sg_len, i) {
856 dma_addr_t mem_addr = sg_dma_address(sg);
857 unsigned int len = sg_dma_len(sg);
858
859 full_size += len;
860
861 while (len) {
862 unsigned int size = min(len, max_chunk_size);
863
864#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
865 /*
866 * Prevent individual transfers from crossing 4GB
867 * boundaries.
868 */
869 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32)
870 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
871 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32)
872 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
873
874 /*
875 * Check if either of the source or destination address
876 * can't be expressed in 32 bits. If so we can't use
877 * hardware descriptor lists.
878 */
879 if (dev_addr >> 32 || mem_addr >> 32)
880 highmem = true;
881#endif
882
883 chunk = rcar_dmac_xfer_chunk_get(chan);
884 if (!chunk) {
885 rcar_dmac_desc_put(chan, desc);
886 return NULL;
887 }
888
889 if (dir == DMA_DEV_TO_MEM) {
890 chunk->src_addr = dev_addr;
891 chunk->dst_addr = mem_addr;
892 } else {
893 chunk->src_addr = mem_addr;
894 chunk->dst_addr = dev_addr;
895 }
896
897 chunk->size = size;
898
899 dev_dbg(chan->chan.device->dev,
900 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
901 chan->index, chunk, desc, i, sg, size, len,
902 &chunk->src_addr, &chunk->dst_addr);
903
904 mem_addr += size;
905 if (dir == DMA_MEM_TO_MEM)
906 dev_addr += size;
907
908 len -= size;
909
910 list_add_tail(&chunk->node, &desc->chunks);
911 nchunks++;
912 }
913 }
914
915 desc->nchunks = nchunks;
916 desc->size = full_size;
917
918 /*
919 * Use hardware descriptor lists if possible when more than one chunk
920 * needs to be transferred (otherwise they don't make much sense).
921 *
922 * The highmem check currently covers the whole transfer. As an
923 * optimization we could use descriptor lists for consecutive lowmem
924 * chunks and direct manual mode for highmem chunks. Whether the
925 * performance improvement would be significant enough compared to the
926 * additional complexity remains to be investigated.
927 */
928 desc->hwdescs.use = !highmem && nchunks > 1;
929 if (desc->hwdescs.use) {
930 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
931 desc->hwdescs.use = false;
932 }
933
934 return &desc->async_tx;
935}
936
937/* -----------------------------------------------------------------------------
938 * DMA engine operations
939 */
940
941static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
942{
943 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
944 int ret;
945
946 INIT_LIST_HEAD(&rchan->desc.chunks_free);
947 INIT_LIST_HEAD(&rchan->desc.pages);
948
949 /* Preallocate descriptors. */
950 ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
951 if (ret < 0)
952 return -ENOMEM;
953
954 ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
955 if (ret < 0)
956 return -ENOMEM;
957
958 return pm_runtime_get_sync(chan->device->dev);
959}
960
961static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
962{
963 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
964 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
965 struct rcar_dmac_desc_page *page, *_page;
966 struct rcar_dmac_desc *desc;
967 LIST_HEAD(list);
968
969 /* Protect against ISR */
970 spin_lock_irq(&rchan->lock);
971 rcar_dmac_chan_halt(rchan);
972 spin_unlock_irq(&rchan->lock);
973
974 /* Now no new interrupts will occur */
975
976 if (rchan->mid_rid >= 0) {
977 /* The caller is holding dma_list_mutex */
978 clear_bit(rchan->mid_rid, dmac->modules);
979 rchan->mid_rid = -EINVAL;
980 }
981
982 list_splice_init(&rchan->desc.free, &list);
983 list_splice_init(&rchan->desc.pending, &list);
984 list_splice_init(&rchan->desc.active, &list);
985 list_splice_init(&rchan->desc.done, &list);
986 list_splice_init(&rchan->desc.wait, &list);
987
988 list_for_each_entry(desc, &list, node)
989 rcar_dmac_realloc_hwdesc(rchan, desc, 0);
990
991 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
992 list_del(&page->node);
993 free_page((unsigned long)page);
994 }
995
996 pm_runtime_put(chan->device->dev);
997}
998
999static struct dma_async_tx_descriptor *
1000rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
1001 dma_addr_t dma_src, size_t len, unsigned long flags)
1002{
1003 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1004 struct scatterlist sgl;
1005
1006 if (!len)
1007 return NULL;
1008
1009 sg_init_table(&sgl, 1);
1010 sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
1011 offset_in_page(dma_src));
1012 sg_dma_address(&sgl) = dma_src;
1013 sg_dma_len(&sgl) = len;
1014
1015 return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
1016 DMA_MEM_TO_MEM, flags, false);
1017}
1018
1019static struct dma_async_tx_descriptor *
1020rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1021 unsigned int sg_len, enum dma_transfer_direction dir,
1022 unsigned long flags, void *context)
1023{
1024 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1025 dma_addr_t dev_addr;
1026
1027 /* Someone calling slave DMA on a generic channel? */
1028 if (rchan->mid_rid < 0 || !sg_len) {
1029 dev_warn(chan->device->dev,
1030 "%s: bad parameter: len=%d, id=%d\n",
1031 __func__, sg_len, rchan->mid_rid);
1032 return NULL;
1033 }
1034
1035 dev_addr = dir == DMA_DEV_TO_MEM
1036 ? rchan->src_slave_addr : rchan->dst_slave_addr;
1037 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
1038 dir, flags, false);
1039}
1040
1041#define RCAR_DMAC_MAX_SG_LEN 32
1042
1043static struct dma_async_tx_descriptor *
1044rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
1045 size_t buf_len, size_t period_len,
1046 enum dma_transfer_direction dir, unsigned long flags)
1047{
1048 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1049 struct dma_async_tx_descriptor *desc;
1050 struct scatterlist *sgl;
1051 dma_addr_t dev_addr;
1052 unsigned int sg_len;
1053 unsigned int i;
1054
1055 /* Someone calling slave DMA on a generic channel? */
1056 if (rchan->mid_rid < 0 || buf_len < period_len) {
1057 dev_warn(chan->device->dev,
1058 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1059 __func__, buf_len, period_len, rchan->mid_rid);
1060 return NULL;
1061 }
1062
1063 sg_len = buf_len / period_len;
1064 if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
1065 dev_err(chan->device->dev,
1066 "chan%u: sg length %d exceds limit %d",
1067 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
1068 return NULL;
1069 }
1070
1071 /*
1072 * Allocate the sg list dynamically as it would consume too much stack
1073 * space.
1074 */
1075 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
1076 if (!sgl)
1077 return NULL;
1078
1079 sg_init_table(sgl, sg_len);
1080
1081 for (i = 0; i < sg_len; ++i) {
1082 dma_addr_t src = buf_addr + (period_len * i);
1083
1084 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
1085 offset_in_page(src));
1086 sg_dma_address(&sgl[i]) = src;
1087 sg_dma_len(&sgl[i]) = period_len;
1088 }
1089
1090 dev_addr = dir == DMA_DEV_TO_MEM
1091 ? rchan->src_slave_addr : rchan->dst_slave_addr;
1092 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
1093 dir, flags, true);
1094
1095 kfree(sgl);
1096 return desc;
1097}
1098
1099static int rcar_dmac_device_config(struct dma_chan *chan,
1100 struct dma_slave_config *cfg)
1101{
1102 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1103
1104 /*
1105 * We could lock this, but you shouldn't be configuring the
1106 * channel, while using it...
1107 */
1108 rchan->src_slave_addr = cfg->src_addr;
1109 rchan->dst_slave_addr = cfg->dst_addr;
1110 rchan->src_xfer_size = cfg->src_addr_width;
1111 rchan->dst_xfer_size = cfg->dst_addr_width;
1112
1113 return 0;
1114}
1115
1116static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
1117{
1118 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1119 unsigned long flags;
1120
1121 spin_lock_irqsave(&rchan->lock, flags);
1122 rcar_dmac_chan_halt(rchan);
1123 spin_unlock_irqrestore(&rchan->lock, flags);
1124
1125 /*
1126 * FIXME: No new interrupt can occur now, but the IRQ thread might still
1127 * be running.
1128 */
1129
1130 rcar_dmac_chan_reinit(rchan);
1131
1132 return 0;
1133}
1134
1135static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1136 dma_cookie_t cookie)
1137{
1138 struct rcar_dmac_desc *desc = chan->desc.running;
1139 struct rcar_dmac_xfer_chunk *running = NULL;
1140 struct rcar_dmac_xfer_chunk *chunk;
1141 unsigned int residue = 0;
1142 unsigned int dptr = 0;
1143
1144 if (!desc)
1145 return 0;
1146
1147 /*
1148 * If the cookie doesn't correspond to the currently running transfer
1149 * then the descriptor hasn't been processed yet, and the residue is
1150 * equal to the full descriptor size.
1151 */
1152 if (cookie != desc->async_tx.cookie)
1153 return desc->size;
1154
1155 /*
1156 * In descriptor mode the descriptor running pointer is not maintained
1157 * by the interrupt handler, find the running descriptor from the
1158 * descriptor pointer field in the CHCRB register. In non-descriptor
1159 * mode just use the running descriptor pointer.
1160 */
1161 if (desc->hwdescs.use) {
1162 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1163 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1164 WARN_ON(dptr >= desc->nchunks);
1165 } else {
1166 running = desc->running;
1167 }
1168
1169 /* Compute the size of all chunks still to be transferred. */
1170 list_for_each_entry_reverse(chunk, &desc->chunks, node) {
1171 if (chunk == running || ++dptr == desc->nchunks)
1172 break;
1173
1174 residue += chunk->size;
1175 }
1176
1177 /* Add the residue for the current chunk. */
1178 residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
1179
1180 return residue;
1181}
1182
1183static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1184 dma_cookie_t cookie,
1185 struct dma_tx_state *txstate)
1186{
1187 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1188 enum dma_status status;
1189 unsigned long flags;
1190 unsigned int residue;
1191
1192 status = dma_cookie_status(chan, cookie, txstate);
1193 if (status == DMA_COMPLETE || !txstate)
1194 return status;
1195
1196 spin_lock_irqsave(&rchan->lock, flags);
1197 residue = rcar_dmac_chan_get_residue(rchan, cookie);
1198 spin_unlock_irqrestore(&rchan->lock, flags);
1199
1200 dma_set_residue(txstate, residue);
1201
1202 return status;
1203}
1204
1205static void rcar_dmac_issue_pending(struct dma_chan *chan)
1206{
1207 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1208 unsigned long flags;
1209
1210 spin_lock_irqsave(&rchan->lock, flags);
1211
1212 if (list_empty(&rchan->desc.pending))
1213 goto done;
1214
1215 /* Append the pending list to the active list. */
1216 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
1217
1218 /*
1219 * If no transfer is running pick the first descriptor from the active
1220 * list and start the transfer.
1221 */
1222 if (!rchan->desc.running) {
1223 struct rcar_dmac_desc *desc;
1224
1225 desc = list_first_entry(&rchan->desc.active,
1226 struct rcar_dmac_desc, node);
1227 rchan->desc.running = desc;
1228
1229 rcar_dmac_chan_start_xfer(rchan);
1230 }
1231
1232done:
1233 spin_unlock_irqrestore(&rchan->lock, flags);
1234}
1235
1236/* -----------------------------------------------------------------------------
1237 * IRQ handling
1238 */
1239
1240static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
1241{
1242 struct rcar_dmac_desc *desc = chan->desc.running;
1243 unsigned int stage;
1244
1245 if (WARN_ON(!desc || !desc->cyclic)) {
1246 /*
1247 * This should never happen, there should always be a running
1248 * cyclic descriptor when a descriptor stage end interrupt is
1249 * triggered. Warn and return.
1250 */
1251 return IRQ_NONE;
1252 }
1253
1254 /* Program the interrupt pointer to the next stage. */
1255 stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1256 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1257 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
1258
1259 return IRQ_WAKE_THREAD;
1260}
1261
1262static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
1263{
1264 struct rcar_dmac_desc *desc = chan->desc.running;
1265 irqreturn_t ret = IRQ_WAKE_THREAD;
1266
1267 if (WARN_ON_ONCE(!desc)) {
1268 /*
1269 * This should never happen, there should always be a running
1270 * descriptor when a transfer end interrupt is triggered. Warn
1271 * and return.
1272 */
1273 return IRQ_NONE;
1274 }
1275
1276 /*
1277 * The transfer end interrupt isn't generated for each chunk when using
1278 * descriptor mode. Only update the running chunk pointer in
1279 * non-descriptor mode.
1280 */
1281 if (!desc->hwdescs.use) {
1282 /*
1283 * If we haven't completed the last transfer chunk simply move
1284 * to the next one. Only wake the IRQ thread if the transfer is
1285 * cyclic.
1286 */
1287 if (!list_is_last(&desc->running->node, &desc->chunks)) {
1288 desc->running = list_next_entry(desc->running, node);
1289 if (!desc->cyclic)
1290 ret = IRQ_HANDLED;
1291 goto done;
1292 }
1293
1294 /*
1295 * We've completed the last transfer chunk. If the transfer is
1296 * cyclic, move back to the first one.
1297 */
1298 if (desc->cyclic) {
1299 desc->running =
1300 list_first_entry(&desc->chunks,
1301 struct rcar_dmac_xfer_chunk,
1302 node);
1303 goto done;
1304 }
1305 }
1306
1307 /* The descriptor is complete, move it to the done list. */
1308 list_move_tail(&desc->node, &chan->desc.done);
1309
1310 /* Queue the next descriptor, if any. */
1311 if (!list_empty(&chan->desc.active))
1312 chan->desc.running = list_first_entry(&chan->desc.active,
1313 struct rcar_dmac_desc,
1314 node);
1315 else
1316 chan->desc.running = NULL;
1317
1318done:
1319 if (chan->desc.running)
1320 rcar_dmac_chan_start_xfer(chan);
1321
1322 return ret;
1323}
1324
1325static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1326{
1327 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
1328 struct rcar_dmac_chan *chan = dev;
1329 irqreturn_t ret = IRQ_NONE;
1330 u32 chcr;
1331
1332 spin_lock(&chan->lock);
1333
1334 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
1335 if (chcr & RCAR_DMACHCR_TE)
1336 mask |= RCAR_DMACHCR_DE;
1337 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
1338
1339 if (chcr & RCAR_DMACHCR_DSE)
1340 ret |= rcar_dmac_isr_desc_stage_end(chan);
1341
1342 if (chcr & RCAR_DMACHCR_TE)
1343 ret |= rcar_dmac_isr_transfer_end(chan);
1344
1345 spin_unlock(&chan->lock);
1346
1347 return ret;
1348}
1349
1350static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1351{
1352 struct rcar_dmac_chan *chan = dev;
1353 struct rcar_dmac_desc *desc;
1354
1355 spin_lock_irq(&chan->lock);
1356
1357 /* For cyclic transfers notify the user after every chunk. */
1358 if (chan->desc.running && chan->desc.running->cyclic) {
1359 dma_async_tx_callback callback;
1360 void *callback_param;
1361
1362 desc = chan->desc.running;
1363 callback = desc->async_tx.callback;
1364 callback_param = desc->async_tx.callback_param;
1365
1366 if (callback) {
1367 spin_unlock_irq(&chan->lock);
1368 callback(callback_param);
1369 spin_lock_irq(&chan->lock);
1370 }
1371 }
1372
1373 /*
1374 * Call the callback function for all descriptors on the done list and
1375 * move them to the ack wait list.
1376 */
1377 while (!list_empty(&chan->desc.done)) {
1378 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
1379 node);
1380 dma_cookie_complete(&desc->async_tx);
1381 list_del(&desc->node);
1382
1383 if (desc->async_tx.callback) {
1384 spin_unlock_irq(&chan->lock);
1385 /*
1386 * We own the only reference to this descriptor, we can
1387 * safely dereference it without holding the channel
1388 * lock.
1389 */
1390 desc->async_tx.callback(desc->async_tx.callback_param);
1391 spin_lock_irq(&chan->lock);
1392 }
1393
1394 list_add_tail(&desc->node, &chan->desc.wait);
1395 }
1396
1397 spin_unlock_irq(&chan->lock);
1398
1399 /* Recycle all acked descriptors. */
1400 rcar_dmac_desc_recycle_acked(chan);
1401
1402 return IRQ_HANDLED;
1403}
1404
1405static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
1406{
1407 struct rcar_dmac *dmac = data;
1408
1409 if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
1410 return IRQ_NONE;
1411
1412 /*
1413 * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
1414 * abort transfers on all channels, and reinitialize the DMAC.
1415 */
1416 rcar_dmac_stop(dmac);
1417 rcar_dmac_abort(dmac);
1418 rcar_dmac_init(dmac);
1419
1420 return IRQ_HANDLED;
1421}
1422
1423/* -----------------------------------------------------------------------------
1424 * OF xlate and channel filter
1425 */
1426
1427static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
1428{
1429 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1430 struct of_phandle_args *dma_spec = arg;
1431
1432 /*
1433 * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
1434 * function knows from which device it wants to allocate a channel from,
1435 * and would be perfectly capable of selecting the channel it wants.
1436 * Forcing it to call dma_request_channel() and iterate through all
1437 * channels from all controllers is just pointless.
1438 */
1439 if (chan->device->device_config != rcar_dmac_device_config ||
1440 dma_spec->np != chan->device->dev->of_node)
1441 return false;
1442
1443 return !test_and_set_bit(dma_spec->args[0], dmac->modules);
1444}
1445
1446static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
1447 struct of_dma *ofdma)
1448{
1449 struct rcar_dmac_chan *rchan;
1450 struct dma_chan *chan;
1451 dma_cap_mask_t mask;
1452
1453 if (dma_spec->args_count != 1)
1454 return NULL;
1455
1456 /* Only slave DMA channels can be allocated via DT */
1457 dma_cap_zero(mask);
1458 dma_cap_set(DMA_SLAVE, mask);
1459
1460 chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
1461 if (!chan)
1462 return NULL;
1463
1464 rchan = to_rcar_dmac_chan(chan);
1465 rchan->mid_rid = dma_spec->args[0];
1466
1467 return chan;
1468}
1469
1470/* -----------------------------------------------------------------------------
1471 * Power management
1472 */
1473
1474#ifdef CONFIG_PM_SLEEP
1475static int rcar_dmac_sleep_suspend(struct device *dev)
1476{
1477 /*
1478 * TODO: Wait for the current transfer to complete and stop the device.
1479 */
1480 return 0;
1481}
1482
1483static int rcar_dmac_sleep_resume(struct device *dev)
1484{
1485 /* TODO: Resume transfers, if any. */
1486 return 0;
1487}
1488#endif
1489
1490#ifdef CONFIG_PM
1491static int rcar_dmac_runtime_suspend(struct device *dev)
1492{
1493 return 0;
1494}
1495
1496static int rcar_dmac_runtime_resume(struct device *dev)
1497{
1498 struct rcar_dmac *dmac = dev_get_drvdata(dev);
1499
1500 return rcar_dmac_init(dmac);
1501}
1502#endif
1503
1504static const struct dev_pm_ops rcar_dmac_pm = {
1505 SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
1506 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
1507 NULL)
1508};
1509
1510/* -----------------------------------------------------------------------------
1511 * Probe and remove
1512 */
1513
1514static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1515 struct rcar_dmac_chan *rchan,
1516 unsigned int index)
1517{
1518 struct platform_device *pdev = to_platform_device(dmac->dev);
1519 struct dma_chan *chan = &rchan->chan;
1520 char pdev_irqname[5];
1521 char *irqname;
1522 int irq;
1523 int ret;
1524
1525 rchan->index = index;
1526 rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
1527 rchan->mid_rid = -EINVAL;
1528
1529 spin_lock_init(&rchan->lock);
1530
1531 INIT_LIST_HEAD(&rchan->desc.free);
1532 INIT_LIST_HEAD(&rchan->desc.pending);
1533 INIT_LIST_HEAD(&rchan->desc.active);
1534 INIT_LIST_HEAD(&rchan->desc.done);
1535 INIT_LIST_HEAD(&rchan->desc.wait);
1536
1537 /* Request the channel interrupt. */
1538 sprintf(pdev_irqname, "ch%u", index);
1539 irq = platform_get_irq_byname(pdev, pdev_irqname);
1540 if (irq < 0) {
1541 dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
1542 return -ENODEV;
1543 }
1544
1545 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
1546 dev_name(dmac->dev), index);
1547 if (!irqname)
1548 return -ENOMEM;
1549
1550 ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel,
1551 rcar_dmac_isr_channel_thread, 0,
1552 irqname, rchan);
1553 if (ret) {
1554 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret);
1555 return ret;
1556 }
1557
1558 /*
1559 * Initialize the DMA engine channel and add it to the DMA engine
1560 * channels list.
1561 */
1562 chan->device = &dmac->engine;
1563 dma_cookie_init(chan);
1564
1565 list_add_tail(&chan->device_node, &dmac->engine.channels);
1566
1567 return 0;
1568}
1569
1570static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1571{
1572 struct device_node *np = dev->of_node;
1573 int ret;
1574
1575 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1576 if (ret < 0) {
1577 dev_err(dev, "unable to read dma-channels property\n");
1578 return ret;
1579 }
1580
1581 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
1582 dev_err(dev, "invalid number of channels %u\n",
1583 dmac->n_channels);
1584 return -EINVAL;
1585 }
1586
1587 return 0;
1588}
1589
1590static int rcar_dmac_probe(struct platform_device *pdev)
1591{
1592 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
1593 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1594 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1595 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
1596 unsigned int channels_offset = 0;
1597 struct dma_device *engine;
1598 struct rcar_dmac *dmac;
1599 struct resource *mem;
1600 unsigned int i;
1601 char *irqname;
1602 int irq;
1603 int ret;
1604
1605 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1606 if (!dmac)
1607 return -ENOMEM;
1608
1609 dmac->dev = &pdev->dev;
1610 platform_set_drvdata(pdev, dmac);
1611
1612 ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1613 if (ret < 0)
1614 return ret;
1615
1616 /*
1617 * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
1618 * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
1619 * is connected to microTLB 0 on currently supported platforms, so we
1620 * can't use it with the IPMMU. As the IOMMU API operates at the device
1621 * level we can't disable it selectively, so ignore channel 0 for now if
1622 * the device is part of an IOMMU group.
1623 */
1624 if (pdev->dev.iommu_group) {
1625 dmac->n_channels--;
1626 channels_offset = 1;
1627 }
1628
1629 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1630 sizeof(*dmac->channels), GFP_KERNEL);
1631 if (!dmac->channels)
1632 return -ENOMEM;
1633
1634 /* Request resources. */
1635 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1636 dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
1637 if (IS_ERR(dmac->iomem))
1638 return PTR_ERR(dmac->iomem);
1639
1640 irq = platform_get_irq_byname(pdev, "error");
1641 if (irq < 0) {
1642 dev_err(&pdev->dev, "no error IRQ specified\n");
1643 return -ENODEV;
1644 }
1645
1646 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
1647 dev_name(dmac->dev));
1648 if (!irqname)
1649 return -ENOMEM;
1650
1651 ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
1652 irqname, dmac);
1653 if (ret) {
1654 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
1655 irq, ret);
1656 return ret;
1657 }
1658
1659 /* Enable runtime PM and initialize the device. */
1660 pm_runtime_enable(&pdev->dev);
1661 ret = pm_runtime_get_sync(&pdev->dev);
1662 if (ret < 0) {
1663 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
1664 return ret;
1665 }
1666
1667 ret = rcar_dmac_init(dmac);
1668 pm_runtime_put(&pdev->dev);
1669
1670 if (ret) {
1671 dev_err(&pdev->dev, "failed to reset device\n");
1672 goto error;
1673 }
1674
1675 /* Initialize the channels. */
1676 INIT_LIST_HEAD(&dmac->engine.channels);
1677
1678 for (i = 0; i < dmac->n_channels; ++i) {
1679 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
1680 i + channels_offset);
1681 if (ret < 0)
1682 goto error;
1683 }
1684
1685 /* Register the DMAC as a DMA provider for DT. */
1686 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1687 NULL);
1688 if (ret < 0)
1689 goto error;
1690
1691 /*
1692 * Register the DMA engine device.
1693 *
1694 * Default transfer size of 32 bytes requires 32-byte alignment.
1695 */
1696 engine = &dmac->engine;
1697 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1698 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1699
1700 engine->dev = &pdev->dev;
1701 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
1702
1703 engine->src_addr_widths = widths;
1704 engine->dst_addr_widths = widths;
1705 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1706 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1707
1708 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
1709 engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
1710 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
1711 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1712 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1713 engine->device_config = rcar_dmac_device_config;
1714 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1715 engine->device_tx_status = rcar_dmac_tx_status;
1716 engine->device_issue_pending = rcar_dmac_issue_pending;
1717
1718 ret = dma_async_device_register(engine);
1719 if (ret < 0)
1720 goto error;
1721
1722 return 0;
1723
1724error:
1725 of_dma_controller_free(pdev->dev.of_node);
1726 pm_runtime_disable(&pdev->dev);
1727 return ret;
1728}
1729
1730static int rcar_dmac_remove(struct platform_device *pdev)
1731{
1732 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1733
1734 of_dma_controller_free(pdev->dev.of_node);
1735 dma_async_device_unregister(&dmac->engine);
1736
1737 pm_runtime_disable(&pdev->dev);
1738
1739 return 0;
1740}
1741
1742static void rcar_dmac_shutdown(struct platform_device *pdev)
1743{
1744 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1745
1746 rcar_dmac_stop(dmac);
1747}
1748
1749static const struct of_device_id rcar_dmac_of_ids[] = {
1750 { .compatible = "renesas,rcar-dmac", },
1751 { /* Sentinel */ }
1752};
1753MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
1754
1755static struct platform_driver rcar_dmac_driver = {
1756 .driver = {
1757 .pm = &rcar_dmac_pm,
1758 .name = "rcar-dmac",
1759 .of_match_table = rcar_dmac_of_ids,
1760 },
1761 .probe = rcar_dmac_probe,
1762 .remove = rcar_dmac_remove,
1763 .shutdown = rcar_dmac_shutdown,
1764};
1765
1766module_platform_driver(rcar_dmac_driver);
1767
1768MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
1769MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1770MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index 20a6f6f2a018..749f26ecd3b3 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -534,6 +534,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
534 534
535static int hpb_dmae_probe(struct platform_device *pdev) 535static int hpb_dmae_probe(struct platform_device *pdev)
536{ 536{
537 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
538 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES;
537 struct hpb_dmae_pdata *pdata = pdev->dev.platform_data; 539 struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
538 struct hpb_dmae_device *hpbdev; 540 struct hpb_dmae_device *hpbdev;
539 struct dma_device *dma_dev; 541 struct dma_device *dma_dev;
@@ -595,6 +597,10 @@ static int hpb_dmae_probe(struct platform_device *pdev)
595 597
596 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 598 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
597 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 599 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
600 dma_dev->src_addr_widths = widths;
601 dma_dev->dst_addr_widths = widths;
602 dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
603 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
598 604
599 hpbdev->shdma_dev.ops = &hpb_dmae_ops; 605 hpbdev->shdma_dev.ops = &hpb_dmae_ops;
600 hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc); 606 hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 3a2adb131d46..8ee383d339a5 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -729,57 +729,50 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
729 return desc; 729 return desc;
730} 730}
731 731
732static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 732static int shdma_terminate_all(struct dma_chan *chan)
733 unsigned long arg)
734{ 733{
735 struct shdma_chan *schan = to_shdma_chan(chan); 734 struct shdma_chan *schan = to_shdma_chan(chan);
736 struct shdma_dev *sdev = to_shdma_dev(chan->device); 735 struct shdma_dev *sdev = to_shdma_dev(chan->device);
737 const struct shdma_ops *ops = sdev->ops; 736 const struct shdma_ops *ops = sdev->ops;
738 struct dma_slave_config *config;
739 unsigned long flags; 737 unsigned long flags;
740 int ret;
741 738
742 switch (cmd) { 739 spin_lock_irqsave(&schan->chan_lock, flags);
743 case DMA_TERMINATE_ALL: 740 ops->halt_channel(schan);
744 spin_lock_irqsave(&schan->chan_lock, flags);
745 ops->halt_channel(schan);
746 741
747 if (ops->get_partial && !list_empty(&schan->ld_queue)) { 742 if (ops->get_partial && !list_empty(&schan->ld_queue)) {
748 /* Record partial transfer */ 743 /* Record partial transfer */
749 struct shdma_desc *desc = list_first_entry(&schan->ld_queue, 744 struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
750 struct shdma_desc, node); 745 struct shdma_desc, node);
751 desc->partial = ops->get_partial(schan, desc); 746 desc->partial = ops->get_partial(schan, desc);
752 } 747 }
753 748
754 spin_unlock_irqrestore(&schan->chan_lock, flags); 749 spin_unlock_irqrestore(&schan->chan_lock, flags);
755 750
756 shdma_chan_ld_cleanup(schan, true); 751 shdma_chan_ld_cleanup(schan, true);
757 break;
758 case DMA_SLAVE_CONFIG:
759 /*
760 * So far only .slave_id is used, but the slave drivers are
761 * encouraged to also set a transfer direction and an address.
762 */
763 if (!arg)
764 return -EINVAL;
765 /*
766 * We could lock this, but you shouldn't be configuring the
767 * channel, while using it...
768 */
769 config = (struct dma_slave_config *)arg;
770 ret = shdma_setup_slave(schan, config->slave_id,
771 config->direction == DMA_DEV_TO_MEM ?
772 config->src_addr : config->dst_addr);
773 if (ret < 0)
774 return ret;
775 break;
776 default:
777 return -ENXIO;
778 }
779 752
780 return 0; 753 return 0;
781} 754}
782 755
756static int shdma_config(struct dma_chan *chan,
757 struct dma_slave_config *config)
758{
759 struct shdma_chan *schan = to_shdma_chan(chan);
760
761 /*
762 * So far only .slave_id is used, but the slave drivers are
763 * encouraged to also set a transfer direction and an address.
764 */
765 if (!config)
766 return -EINVAL;
767 /*
768 * We could lock this, but you shouldn't be configuring the
769 * channel, while using it...
770 */
771 return shdma_setup_slave(schan, config->slave_id,
772 config->direction == DMA_DEV_TO_MEM ?
773 config->src_addr : config->dst_addr);
774}
775
783static void shdma_issue_pending(struct dma_chan *chan) 776static void shdma_issue_pending(struct dma_chan *chan)
784{ 777{
785 struct shdma_chan *schan = to_shdma_chan(chan); 778 struct shdma_chan *schan = to_shdma_chan(chan);
@@ -1002,7 +995,8 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev,
1002 /* Compulsory for DMA_SLAVE fields */ 995 /* Compulsory for DMA_SLAVE fields */
1003 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; 996 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
1004 dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; 997 dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
1005 dma_dev->device_control = shdma_control; 998 dma_dev->device_config = shdma_config;
999 dma_dev->device_terminate_all = shdma_terminate_all;
1006 1000
1007 dma_dev->dev = dev; 1001 dma_dev->dev = dev;
1008 1002
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index aec8a84784a4..b2431aa30033 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -588,6 +588,7 @@ static void sh_dmae_shutdown(struct platform_device *pdev)
588 sh_dmae_ctl_stop(shdev); 588 sh_dmae_ctl_stop(shdev);
589} 589}
590 590
591#ifdef CONFIG_PM
591static int sh_dmae_runtime_suspend(struct device *dev) 592static int sh_dmae_runtime_suspend(struct device *dev)
592{ 593{
593 return 0; 594 return 0;
@@ -599,8 +600,9 @@ static int sh_dmae_runtime_resume(struct device *dev)
599 600
600 return sh_dmae_rst(shdev); 601 return sh_dmae_rst(shdev);
601} 602}
603#endif
602 604
603#ifdef CONFIG_PM 605#ifdef CONFIG_PM_SLEEP
604static int sh_dmae_suspend(struct device *dev) 606static int sh_dmae_suspend(struct device *dev)
605{ 607{
606 return 0; 608 return 0;
@@ -632,16 +634,12 @@ static int sh_dmae_resume(struct device *dev)
632 634
633 return 0; 635 return 0;
634} 636}
635#else
636#define sh_dmae_suspend NULL
637#define sh_dmae_resume NULL
638#endif 637#endif
639 638
640static const struct dev_pm_ops sh_dmae_pm = { 639static const struct dev_pm_ops sh_dmae_pm = {
641 .suspend = sh_dmae_suspend, 640 SET_SYSTEM_SLEEP_PM_OPS(sh_dmae_suspend, sh_dmae_resume)
642 .resume = sh_dmae_resume, 641 SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend, sh_dmae_runtime_resume,
643 .runtime_suspend = sh_dmae_runtime_suspend, 642 NULL)
644 .runtime_resume = sh_dmae_runtime_resume,
645}; 643};
646 644
647static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) 645static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
@@ -684,6 +682,10 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
684 682
685static int sh_dmae_probe(struct platform_device *pdev) 683static int sh_dmae_probe(struct platform_device *pdev)
686{ 684{
685 const enum dma_slave_buswidth widths =
686 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
687 DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
688 DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES;
687 const struct sh_dmae_pdata *pdata; 689 const struct sh_dmae_pdata *pdata;
688 unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {}; 690 unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
689 int chan_irq[SH_DMAE_MAX_CHANNELS]; 691 int chan_irq[SH_DMAE_MAX_CHANNELS];
@@ -746,6 +748,11 @@ static int sh_dmae_probe(struct platform_device *pdev)
746 return PTR_ERR(shdev->dmars); 748 return PTR_ERR(shdev->dmars);
747 } 749 }
748 750
751 dma_dev->src_addr_widths = widths;
752 dma_dev->dst_addr_widths = widths;
753 dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
754 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
755
749 if (!pdata->slave_only) 756 if (!pdata->slave_only)
750 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 757 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
751 if (pdata->slave && pdata->slave_num) 758 if (pdata->slave && pdata->slave_num)
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 3492a5f91d31..d0086e9f2082 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -281,9 +281,10 @@ static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
281 return cookie; 281 return cookie;
282} 282}
283 283
284static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, 284static int sirfsoc_dma_slave_config(struct dma_chan *chan,
285 struct dma_slave_config *config) 285 struct dma_slave_config *config)
286{ 286{
287 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
287 unsigned long flags; 288 unsigned long flags;
288 289
289 if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || 290 if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
@@ -297,8 +298,9 @@ static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
297 return 0; 298 return 0;
298} 299}
299 300
300static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) 301static int sirfsoc_dma_terminate_all(struct dma_chan *chan)
301{ 302{
303 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
302 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); 304 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
303 int cid = schan->chan.chan_id; 305 int cid = schan->chan.chan_id;
304 unsigned long flags; 306 unsigned long flags;
@@ -327,8 +329,9 @@ static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
327 return 0; 329 return 0;
328} 330}
329 331
330static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan) 332static int sirfsoc_dma_pause_chan(struct dma_chan *chan)
331{ 333{
334 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
332 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); 335 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
333 int cid = schan->chan.chan_id; 336 int cid = schan->chan.chan_id;
334 unsigned long flags; 337 unsigned long flags;
@@ -348,8 +351,9 @@ static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
348 return 0; 351 return 0;
349} 352}
350 353
351static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan) 354static int sirfsoc_dma_resume_chan(struct dma_chan *chan)
352{ 355{
356 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
353 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); 357 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
354 int cid = schan->chan.chan_id; 358 int cid = schan->chan.chan_id;
355 unsigned long flags; 359 unsigned long flags;
@@ -369,30 +373,6 @@ static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
369 return 0; 373 return 0;
370} 374}
371 375
372static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
373 unsigned long arg)
374{
375 struct dma_slave_config *config;
376 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
377
378 switch (cmd) {
379 case DMA_PAUSE:
380 return sirfsoc_dma_pause_chan(schan);
381 case DMA_RESUME:
382 return sirfsoc_dma_resume_chan(schan);
383 case DMA_TERMINATE_ALL:
384 return sirfsoc_dma_terminate_all(schan);
385 case DMA_SLAVE_CONFIG:
386 config = (struct dma_slave_config *)arg;
387 return sirfsoc_dma_slave_config(schan, config);
388
389 default:
390 break;
391 }
392
393 return -ENOSYS;
394}
395
396/* Alloc channel resources */ 376/* Alloc channel resources */
397static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) 377static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
398{ 378{
@@ -648,18 +628,6 @@ EXPORT_SYMBOL(sirfsoc_dma_filter_id);
648 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 628 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
649 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) 629 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
650 630
651static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
652 struct dma_slave_caps *caps)
653{
654 caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
655 caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
656 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
657 caps->cmd_pause = true;
658 caps->cmd_terminate = true;
659
660 return 0;
661}
662
663static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec, 631static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
664 struct of_dma *ofdma) 632 struct of_dma *ofdma)
665{ 633{
@@ -739,11 +707,16 @@ static int sirfsoc_dma_probe(struct platform_device *op)
739 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; 707 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
740 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; 708 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
741 dma->device_issue_pending = sirfsoc_dma_issue_pending; 709 dma->device_issue_pending = sirfsoc_dma_issue_pending;
742 dma->device_control = sirfsoc_dma_control; 710 dma->device_config = sirfsoc_dma_slave_config;
711 dma->device_pause = sirfsoc_dma_pause_chan;
712 dma->device_resume = sirfsoc_dma_resume_chan;
713 dma->device_terminate_all = sirfsoc_dma_terminate_all;
743 dma->device_tx_status = sirfsoc_dma_tx_status; 714 dma->device_tx_status = sirfsoc_dma_tx_status;
744 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; 715 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
745 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; 716 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
746 dma->device_slave_caps = sirfsoc_dma_device_slave_caps; 717 dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
718 dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
719 dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
747 720
748 INIT_LIST_HEAD(&dma->channels); 721 INIT_LIST_HEAD(&dma->channels);
749 dma_cap_set(DMA_SLAVE, dma->cap_mask); 722 dma_cap_set(DMA_SLAVE, dma->cap_mask);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 15d49461c0d2..68aca3334a17 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1429,11 +1429,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c)
1429 return is_link; 1429 return is_link;
1430} 1430}
1431 1431
1432static int d40_pause(struct d40_chan *d40c) 1432static int d40_pause(struct dma_chan *chan)
1433{ 1433{
1434 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1434 int res = 0; 1435 int res = 0;
1435 unsigned long flags; 1436 unsigned long flags;
1436 1437
1438 if (d40c->phy_chan == NULL) {
1439 chan_err(d40c, "Channel is not allocated!\n");
1440 return -EINVAL;
1441 }
1442
1437 if (!d40c->busy) 1443 if (!d40c->busy)
1438 return 0; 1444 return 0;
1439 1445
@@ -1448,11 +1454,17 @@ static int d40_pause(struct d40_chan *d40c)
1448 return res; 1454 return res;
1449} 1455}
1450 1456
1451static int d40_resume(struct d40_chan *d40c) 1457static int d40_resume(struct dma_chan *chan)
1452{ 1458{
1459 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1453 int res = 0; 1460 int res = 0;
1454 unsigned long flags; 1461 unsigned long flags;
1455 1462
1463 if (d40c->phy_chan == NULL) {
1464 chan_err(d40c, "Channel is not allocated!\n");
1465 return -EINVAL;
1466 }
1467
1456 if (!d40c->busy) 1468 if (!d40c->busy)
1457 return 0; 1469 return 0;
1458 1470
@@ -2604,12 +2616,17 @@ static void d40_issue_pending(struct dma_chan *chan)
2604 spin_unlock_irqrestore(&d40c->lock, flags); 2616 spin_unlock_irqrestore(&d40c->lock, flags);
2605} 2617}
2606 2618
2607static void d40_terminate_all(struct dma_chan *chan) 2619static int d40_terminate_all(struct dma_chan *chan)
2608{ 2620{
2609 unsigned long flags; 2621 unsigned long flags;
2610 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2622 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2611 int ret; 2623 int ret;
2612 2624
2625 if (d40c->phy_chan == NULL) {
2626 chan_err(d40c, "Channel is not allocated!\n");
2627 return -EINVAL;
2628 }
2629
2613 spin_lock_irqsave(&d40c->lock, flags); 2630 spin_lock_irqsave(&d40c->lock, flags);
2614 2631
2615 pm_runtime_get_sync(d40c->base->dev); 2632 pm_runtime_get_sync(d40c->base->dev);
@@ -2627,6 +2644,7 @@ static void d40_terminate_all(struct dma_chan *chan)
2627 d40c->busy = false; 2644 d40c->busy = false;
2628 2645
2629 spin_unlock_irqrestore(&d40c->lock, flags); 2646 spin_unlock_irqrestore(&d40c->lock, flags);
2647 return 0;
2630} 2648}
2631 2649
2632static int 2650static int
@@ -2673,6 +2691,11 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2673 u32 src_maxburst, dst_maxburst; 2691 u32 src_maxburst, dst_maxburst;
2674 int ret; 2692 int ret;
2675 2693
2694 if (d40c->phy_chan == NULL) {
2695 chan_err(d40c, "Channel is not allocated!\n");
2696 return -EINVAL;
2697 }
2698
2676 src_addr_width = config->src_addr_width; 2699 src_addr_width = config->src_addr_width;
2677 src_maxburst = config->src_maxburst; 2700 src_maxburst = config->src_maxburst;
2678 dst_addr_width = config->dst_addr_width; 2701 dst_addr_width = config->dst_addr_width;
@@ -2781,35 +2804,6 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2781 return 0; 2804 return 0;
2782} 2805}
2783 2806
2784static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2785 unsigned long arg)
2786{
2787 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2788
2789 if (d40c->phy_chan == NULL) {
2790 chan_err(d40c, "Channel is not allocated!\n");
2791 return -EINVAL;
2792 }
2793
2794 switch (cmd) {
2795 case DMA_TERMINATE_ALL:
2796 d40_terminate_all(chan);
2797 return 0;
2798 case DMA_PAUSE:
2799 return d40_pause(d40c);
2800 case DMA_RESUME:
2801 return d40_resume(d40c);
2802 case DMA_SLAVE_CONFIG:
2803 return d40_set_runtime_config(chan,
2804 (struct dma_slave_config *) arg);
2805 default:
2806 break;
2807 }
2808
2809 /* Other commands are unimplemented */
2810 return -ENXIO;
2811}
2812
2813/* Initialization functions */ 2807/* Initialization functions */
2814 2808
2815static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, 2809static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
@@ -2870,7 +2864,10 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2870 dev->device_free_chan_resources = d40_free_chan_resources; 2864 dev->device_free_chan_resources = d40_free_chan_resources;
2871 dev->device_issue_pending = d40_issue_pending; 2865 dev->device_issue_pending = d40_issue_pending;
2872 dev->device_tx_status = d40_tx_status; 2866 dev->device_tx_status = d40_tx_status;
2873 dev->device_control = d40_control; 2867 dev->device_config = d40_set_runtime_config;
2868 dev->device_pause = d40_pause;
2869 dev->device_resume = d40_resume;
2870 dev->device_terminate_all = d40_terminate_all;
2874 dev->dev = base->dev; 2871 dev->dev = base->dev;
2875} 2872}
2876 2873
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 159f1736a16f..7ebcf9bec698 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -355,38 +355,6 @@ static void sun6i_dma_free_desc(struct virt_dma_desc *vd)
355 kfree(txd); 355 kfree(txd);
356} 356}
357 357
358static int sun6i_dma_terminate_all(struct sun6i_vchan *vchan)
359{
360 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
361 struct sun6i_pchan *pchan = vchan->phy;
362 unsigned long flags;
363 LIST_HEAD(head);
364
365 spin_lock(&sdev->lock);
366 list_del_init(&vchan->node);
367 spin_unlock(&sdev->lock);
368
369 spin_lock_irqsave(&vchan->vc.lock, flags);
370
371 vchan_get_all_descriptors(&vchan->vc, &head);
372
373 if (pchan) {
374 writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
375 writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
376
377 vchan->phy = NULL;
378 pchan->vchan = NULL;
379 pchan->desc = NULL;
380 pchan->done = NULL;
381 }
382
383 spin_unlock_irqrestore(&vchan->vc.lock, flags);
384
385 vchan_dma_desc_free_list(&vchan->vc, &head);
386
387 return 0;
388}
389
390static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) 358static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
391{ 359{
392 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); 360 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
@@ -675,57 +643,92 @@ err_lli_free:
675 return NULL; 643 return NULL;
676} 644}
677 645
678static int sun6i_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 646static int sun6i_dma_config(struct dma_chan *chan,
679 unsigned long arg) 647 struct dma_slave_config *config)
648{
649 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
650
651 memcpy(&vchan->cfg, config, sizeof(*config));
652
653 return 0;
654}
655
656static int sun6i_dma_pause(struct dma_chan *chan)
657{
658 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
659 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
660 struct sun6i_pchan *pchan = vchan->phy;
661
662 dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc);
663
664 if (pchan) {
665 writel(DMA_CHAN_PAUSE_PAUSE,
666 pchan->base + DMA_CHAN_PAUSE);
667 } else {
668 spin_lock(&sdev->lock);
669 list_del_init(&vchan->node);
670 spin_unlock(&sdev->lock);
671 }
672
673 return 0;
674}
675
676static int sun6i_dma_resume(struct dma_chan *chan)
680{ 677{
681 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); 678 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
682 struct sun6i_vchan *vchan = to_sun6i_vchan(chan); 679 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
683 struct sun6i_pchan *pchan = vchan->phy; 680 struct sun6i_pchan *pchan = vchan->phy;
684 unsigned long flags; 681 unsigned long flags;
685 int ret = 0;
686 682
687 switch (cmd) { 683 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
688 case DMA_RESUME:
689 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
690 684
691 spin_lock_irqsave(&vchan->vc.lock, flags); 685 spin_lock_irqsave(&vchan->vc.lock, flags);
692 686
693 if (pchan) { 687 if (pchan) {
694 writel(DMA_CHAN_PAUSE_RESUME, 688 writel(DMA_CHAN_PAUSE_RESUME,
695 pchan->base + DMA_CHAN_PAUSE); 689 pchan->base + DMA_CHAN_PAUSE);
696 } else if (!list_empty(&vchan->vc.desc_issued)) { 690 } else if (!list_empty(&vchan->vc.desc_issued)) {
697 spin_lock(&sdev->lock); 691 spin_lock(&sdev->lock);
698 list_add_tail(&vchan->node, &sdev->pending); 692 list_add_tail(&vchan->node, &sdev->pending);
699 spin_unlock(&sdev->lock); 693 spin_unlock(&sdev->lock);
700 } 694 }
701 695
702 spin_unlock_irqrestore(&vchan->vc.lock, flags); 696 spin_unlock_irqrestore(&vchan->vc.lock, flags);
703 break;
704 697
705 case DMA_PAUSE: 698 return 0;
706 dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc); 699}
707 700
708 if (pchan) { 701static int sun6i_dma_terminate_all(struct dma_chan *chan)
709 writel(DMA_CHAN_PAUSE_PAUSE, 702{
710 pchan->base + DMA_CHAN_PAUSE); 703 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
711 } else { 704 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
712 spin_lock(&sdev->lock); 705 struct sun6i_pchan *pchan = vchan->phy;
713 list_del_init(&vchan->node); 706 unsigned long flags;
714 spin_unlock(&sdev->lock); 707 LIST_HEAD(head);
715 } 708
716 break; 709 spin_lock(&sdev->lock);
717 710 list_del_init(&vchan->node);
718 case DMA_TERMINATE_ALL: 711 spin_unlock(&sdev->lock);
719 ret = sun6i_dma_terminate_all(vchan); 712
720 break; 713 spin_lock_irqsave(&vchan->vc.lock, flags);
721 case DMA_SLAVE_CONFIG: 714
722 memcpy(&vchan->cfg, (void *)arg, sizeof(struct dma_slave_config)); 715 vchan_get_all_descriptors(&vchan->vc, &head);
723 break; 716
724 default: 717 if (pchan) {
725 ret = -ENXIO; 718 writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
726 break; 719 writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
720
721 vchan->phy = NULL;
722 pchan->vchan = NULL;
723 pchan->desc = NULL;
724 pchan->done = NULL;
727 } 725 }
728 return ret; 726
727 spin_unlock_irqrestore(&vchan->vc.lock, flags);
728
729 vchan_dma_desc_free_list(&vchan->vc, &head);
730
731 return 0;
729} 732}
730 733
731static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, 734static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
@@ -960,9 +963,20 @@ static int sun6i_dma_probe(struct platform_device *pdev)
960 sdc->slave.device_issue_pending = sun6i_dma_issue_pending; 963 sdc->slave.device_issue_pending = sun6i_dma_issue_pending;
961 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; 964 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
962 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; 965 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
963 sdc->slave.device_control = sun6i_dma_control;
964 sdc->slave.copy_align = 4; 966 sdc->slave.copy_align = 4;
965 967 sdc->slave.device_config = sun6i_dma_config;
968 sdc->slave.device_pause = sun6i_dma_pause;
969 sdc->slave.device_resume = sun6i_dma_resume;
970 sdc->slave.device_terminate_all = sun6i_dma_terminate_all;
971 sdc->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
972 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
973 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
974 sdc->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
975 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
976 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
977 sdc->slave.directions = BIT(DMA_DEV_TO_MEM) |
978 BIT(DMA_MEM_TO_DEV);
979 sdc->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
966 sdc->slave.dev = &pdev->dev; 980 sdc->slave.dev = &pdev->dev;
967 981
968 sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels, 982 sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index d8450c3f35f0..eaf585e8286b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -723,7 +723,7 @@ end:
723 return; 723 return;
724} 724}
725 725
726static void tegra_dma_terminate_all(struct dma_chan *dc) 726static int tegra_dma_terminate_all(struct dma_chan *dc)
727{ 727{
728 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 728 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
729 struct tegra_dma_sg_req *sgreq; 729 struct tegra_dma_sg_req *sgreq;
@@ -736,7 +736,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
736 spin_lock_irqsave(&tdc->lock, flags); 736 spin_lock_irqsave(&tdc->lock, flags);
737 if (list_empty(&tdc->pending_sg_req)) { 737 if (list_empty(&tdc->pending_sg_req)) {
738 spin_unlock_irqrestore(&tdc->lock, flags); 738 spin_unlock_irqrestore(&tdc->lock, flags);
739 return; 739 return 0;
740 } 740 }
741 741
742 if (!tdc->busy) 742 if (!tdc->busy)
@@ -777,6 +777,7 @@ skip_dma_stop:
777 dma_desc->cb_count = 0; 777 dma_desc->cb_count = 0;
778 } 778 }
779 spin_unlock_irqrestore(&tdc->lock, flags); 779 spin_unlock_irqrestore(&tdc->lock, flags);
780 return 0;
780} 781}
781 782
782static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, 783static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
@@ -827,25 +828,6 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
827 return ret; 828 return ret;
828} 829}
829 830
830static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
831 unsigned long arg)
832{
833 switch (cmd) {
834 case DMA_SLAVE_CONFIG:
835 return tegra_dma_slave_config(dc,
836 (struct dma_slave_config *)arg);
837
838 case DMA_TERMINATE_ALL:
839 tegra_dma_terminate_all(dc);
840 return 0;
841
842 default:
843 break;
844 }
845
846 return -ENXIO;
847}
848
849static inline int get_bus_width(struct tegra_dma_channel *tdc, 831static inline int get_bus_width(struct tegra_dma_channel *tdc,
850 enum dma_slave_buswidth slave_bw) 832 enum dma_slave_buswidth slave_bw)
851{ 833{
@@ -1443,7 +1425,23 @@ static int tegra_dma_probe(struct platform_device *pdev)
1443 tegra_dma_free_chan_resources; 1425 tegra_dma_free_chan_resources;
1444 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; 1426 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1445 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; 1427 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1446 tdma->dma_dev.device_control = tegra_dma_device_control; 1428 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1429 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1430 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1431 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1432 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1433 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1434 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1435 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1436 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1437 /*
1438 * XXX The hardware appears to support
1439 * DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's
1440 * only used by this driver during tegra_dma_terminate_all()
1441 */
1442 tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1443 tdma->dma_dev.device_config = tegra_dma_slave_config;
1444 tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
1447 tdma->dma_dev.device_tx_status = tegra_dma_tx_status; 1445 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1448 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; 1446 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1449 1447
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 2407ccf1a64b..c4c3d93fdd1b 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -561,8 +561,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
561 return &td_desc->txd; 561 return &td_desc->txd;
562} 562}
563 563
564static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 564static int td_terminate_all(struct dma_chan *chan)
565 unsigned long arg)
566{ 565{
567 struct timb_dma_chan *td_chan = 566 struct timb_dma_chan *td_chan =
568 container_of(chan, struct timb_dma_chan, chan); 567 container_of(chan, struct timb_dma_chan, chan);
@@ -570,9 +569,6 @@ static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
570 569
571 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 570 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
572 571
573 if (cmd != DMA_TERMINATE_ALL)
574 return -ENXIO;
575
576 /* first the easy part, put the queue into the free list */ 572 /* first the easy part, put the queue into the free list */
577 spin_lock_bh(&td_chan->lock); 573 spin_lock_bh(&td_chan->lock);
578 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, 574 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
@@ -697,7 +693,7 @@ static int td_probe(struct platform_device *pdev)
697 dma_cap_set(DMA_SLAVE, td->dma.cap_mask); 693 dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
698 dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); 694 dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
699 td->dma.device_prep_slave_sg = td_prep_slave_sg; 695 td->dma.device_prep_slave_sg = td_prep_slave_sg;
700 td->dma.device_control = td_control; 696 td->dma.device_terminate_all = td_terminate_all;
701 697
702 td->dma.dev = &pdev->dev; 698 td->dma.dev = &pdev->dev;
703 699
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 0659ec9c4488..8849318b32b7 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -901,17 +901,12 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
901 return &first->txd; 901 return &first->txd;
902} 902}
903 903
904static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 904static int txx9dmac_terminate_all(struct dma_chan *chan)
905 unsigned long arg)
906{ 905{
907 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 906 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
908 struct txx9dmac_desc *desc, *_desc; 907 struct txx9dmac_desc *desc, *_desc;
909 LIST_HEAD(list); 908 LIST_HEAD(list);
910 909
911 /* Only supports DMA_TERMINATE_ALL */
912 if (cmd != DMA_TERMINATE_ALL)
913 return -EINVAL;
914
915 dev_vdbg(chan2dev(chan), "terminate_all\n"); 910 dev_vdbg(chan2dev(chan), "terminate_all\n");
916 spin_lock_bh(&dc->lock); 911 spin_lock_bh(&dc->lock);
917 912
@@ -1109,7 +1104,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1109 dc->dma.dev = &pdev->dev; 1104 dc->dma.dev = &pdev->dev;
1110 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; 1105 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
1111 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; 1106 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
1112 dc->dma.device_control = txx9dmac_control; 1107 dc->dma.device_terminate_all = txx9dmac_terminate_all;
1113 dc->dma.device_tx_status = txx9dmac_tx_status; 1108 dc->dma.device_tx_status = txx9dmac_tx_status;
1114 dc->dma.device_issue_pending = txx9dmac_issue_pending; 1109 dc->dma.device_issue_pending = txx9dmac_issue_pending;
1115 if (pdata && pdata->memcpy_chan == ch) { 1110 if (pdata && pdata->memcpy_chan == ch) {
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 4a3a8f3137b3..bdd2a5dd7220 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -1001,13 +1001,17 @@ error:
1001 * xilinx_vdma_terminate_all - Halt the channel and free descriptors 1001 * xilinx_vdma_terminate_all - Halt the channel and free descriptors
1002 * @chan: Driver specific VDMA Channel pointer 1002 * @chan: Driver specific VDMA Channel pointer
1003 */ 1003 */
1004static void xilinx_vdma_terminate_all(struct xilinx_vdma_chan *chan) 1004static int xilinx_vdma_terminate_all(struct dma_chan *dchan)
1005{ 1005{
1006 struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
1007
1006 /* Halt the DMA engine */ 1008 /* Halt the DMA engine */
1007 xilinx_vdma_halt(chan); 1009 xilinx_vdma_halt(chan);
1008 1010
1009 /* Remove and free all of the descriptors in the lists */ 1011 /* Remove and free all of the descriptors in the lists */
1010 xilinx_vdma_free_descriptors(chan); 1012 xilinx_vdma_free_descriptors(chan);
1013
1014 return 0;
1011} 1015}
1012 1016
1013/** 1017/**
@@ -1075,27 +1079,6 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
1075} 1079}
1076EXPORT_SYMBOL(xilinx_vdma_channel_set_config); 1080EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
1077 1081
1078/**
1079 * xilinx_vdma_device_control - Configure DMA channel of the device
1080 * @dchan: DMA Channel pointer
1081 * @cmd: DMA control command
1082 * @arg: Channel configuration
1083 *
1084 * Return: '0' on success and failure value on error
1085 */
1086static int xilinx_vdma_device_control(struct dma_chan *dchan,
1087 enum dma_ctrl_cmd cmd, unsigned long arg)
1088{
1089 struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
1090
1091 if (cmd != DMA_TERMINATE_ALL)
1092 return -ENXIO;
1093
1094 xilinx_vdma_terminate_all(chan);
1095
1096 return 0;
1097}
1098
1099/* ----------------------------------------------------------------------------- 1082/* -----------------------------------------------------------------------------
1100 * Probe and remove 1083 * Probe and remove
1101 */ 1084 */
@@ -1300,7 +1283,7 @@ static int xilinx_vdma_probe(struct platform_device *pdev)
1300 xilinx_vdma_free_chan_resources; 1283 xilinx_vdma_free_chan_resources;
1301 xdev->common.device_prep_interleaved_dma = 1284 xdev->common.device_prep_interleaved_dma =
1302 xilinx_vdma_dma_prep_interleaved; 1285 xilinx_vdma_dma_prep_interleaved;
1303 xdev->common.device_control = xilinx_vdma_device_control; 1286 xdev->common.device_terminate_all = xilinx_vdma_terminate_all;
1304 xdev->common.device_tx_status = xilinx_vdma_tx_status; 1287 xdev->common.device_tx_status = xilinx_vdma_tx_status;
1305 xdev->common.device_issue_pending = xilinx_vdma_issue_pending; 1288 xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
1306 1289
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 17638d7cf5c2..5907c1718f8c 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2174,14 +2174,20 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
2174 2174
2175static inline void decode_bus_error(int node_id, struct mce *m) 2175static inline void decode_bus_error(int node_id, struct mce *m)
2176{ 2176{
2177 struct mem_ctl_info *mci = mcis[node_id]; 2177 struct mem_ctl_info *mci;
2178 struct amd64_pvt *pvt = mci->pvt_info; 2178 struct amd64_pvt *pvt;
2179 u8 ecc_type = (m->status >> 45) & 0x3; 2179 u8 ecc_type = (m->status >> 45) & 0x3;
2180 u8 xec = XEC(m->status, 0x1f); 2180 u8 xec = XEC(m->status, 0x1f);
2181 u16 ec = EC(m->status); 2181 u16 ec = EC(m->status);
2182 u64 sys_addr; 2182 u64 sys_addr;
2183 struct err_info err; 2183 struct err_info err;
2184 2184
2185 mci = edac_mc_find(node_id);
2186 if (!mci)
2187 return;
2188
2189 pvt = mci->pvt_info;
2190
2185 /* Bail out early if this was an 'observed' error */ 2191 /* Bail out early if this was an 'observed' error */
2186 if (PP(ec) == NBSL_PP_OBS) 2192 if (PP(ec) == NBSL_PP_OBS)
2187 return; 2193 return;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 63aa6730e89e..1acf57ba4c86 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -2447,7 +2447,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2447 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table); 2447 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table);
2448 type = IVY_BRIDGE; 2448 type = IVY_BRIDGE;
2449 break; 2449 break;
2450 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA: 2450 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
2451 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table); 2451 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table);
2452 type = SANDY_BRIDGE; 2452 type = SANDY_BRIDGE;
2453 break; 2453 break;
@@ -2460,8 +2460,11 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2460 type = BROADWELL; 2460 type = BROADWELL;
2461 break; 2461 break;
2462 } 2462 }
2463 if (unlikely(rc < 0)) 2463 if (unlikely(rc < 0)) {
2464 edac_dbg(0, "couldn't get all devices for 0x%x\n", pdev->device);
2464 goto fail0; 2465 goto fail0;
2466 }
2467
2465 mc = 0; 2468 mc = 0;
2466 2469
2467 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 2470 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
@@ -2474,7 +2477,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2474 goto fail1; 2477 goto fail1;
2475 } 2478 }
2476 2479
2477 sbridge_printk(KERN_INFO, "Driver loaded.\n"); 2480 sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
2478 2481
2479 mutex_unlock(&sbridge_edac_lock); 2482 mutex_unlock(&sbridge_edac_lock);
2480 return 0; 2483 return 0;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index eb6935c8ad94..d6a09b9cd8cc 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -1246,14 +1246,14 @@ static const u32 model_textual_descriptor[] = {
1246 1246
1247static struct fw_descriptor vendor_id_descriptor = { 1247static struct fw_descriptor vendor_id_descriptor = {
1248 .length = ARRAY_SIZE(vendor_textual_descriptor), 1248 .length = ARRAY_SIZE(vendor_textual_descriptor),
1249 .immediate = 0x03d00d1e, 1249 .immediate = 0x03001f11,
1250 .key = 0x81000000, 1250 .key = 0x81000000,
1251 .data = vendor_textual_descriptor, 1251 .data = vendor_textual_descriptor,
1252}; 1252};
1253 1253
1254static struct fw_descriptor model_id_descriptor = { 1254static struct fw_descriptor model_id_descriptor = {
1255 .length = ARRAY_SIZE(model_textual_descriptor), 1255 .length = ARRAY_SIZE(model_textual_descriptor),
1256 .immediate = 0x17000001, 1256 .immediate = 0x17023901,
1257 .key = 0x81000000, 1257 .key = 0x81000000,
1258 .data = model_textual_descriptor, 1258 .data = model_textual_descriptor,
1259}; 1259};
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index aff9018d0658..f51d376d10ba 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -718,11 +718,6 @@ static inline unsigned int ar_next_buffer_index(unsigned int index)
718 return (index + 1) % AR_BUFFERS; 718 return (index + 1) % AR_BUFFERS;
719} 719}
720 720
721static inline unsigned int ar_prev_buffer_index(unsigned int index)
722{
723 return (index - 1 + AR_BUFFERS) % AR_BUFFERS;
724}
725
726static inline unsigned int ar_first_buffer_index(struct ar_context *ctx) 721static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
727{ 722{
728 return ar_next_buffer_index(ctx->last_buffer_index); 723 return ar_next_buffer_index(ctx->last_buffer_index);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 64ac8f8f5098..c22606fe3d44 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1463,17 +1463,6 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
1463 struct sbp2_command_orb *orb; 1463 struct sbp2_command_orb *orb;
1464 int generation, retval = SCSI_MLQUEUE_HOST_BUSY; 1464 int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
1465 1465
1466 /*
1467 * Bidirectional commands are not yet implemented, and unknown
1468 * transfer direction not handled.
1469 */
1470 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
1471 dev_err(lu_dev(lu), "cannot handle bidirectional command\n");
1472 cmd->result = DID_ERROR << 16;
1473 cmd->scsi_done(cmd);
1474 return 0;
1475 }
1476
1477 orb = kzalloc(sizeof(*orb), GFP_ATOMIC); 1466 orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
1478 if (orb == NULL) 1467 if (orb == NULL)
1479 return SCSI_MLQUEUE_HOST_BUSY; 1468 return SCSI_MLQUEUE_HOST_BUSY;
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index af5d63c7cc53..2fe195002021 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -75,29 +75,25 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
75 unsigned long key; 75 unsigned long key;
76 u32 desc_version; 76 u32 desc_version;
77 77
78 *map_size = 0; 78 *map_size = sizeof(*m) * 32;
79 *desc_size = 0; 79again:
80 key = 0;
81 status = efi_call_early(get_memory_map, map_size, NULL,
82 &key, desc_size, &desc_version);
83 if (status != EFI_BUFFER_TOO_SMALL)
84 return EFI_LOAD_ERROR;
85
86 /* 80 /*
87 * Add an additional efi_memory_desc_t because we're doing an 81 * Add an additional efi_memory_desc_t because we're doing an
88 * allocation which may be in a new descriptor region. 82 * allocation which may be in a new descriptor region.
89 */ 83 */
90 *map_size += *desc_size; 84 *map_size += sizeof(*m);
91 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 85 status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
92 *map_size, (void **)&m); 86 *map_size, (void **)&m);
93 if (status != EFI_SUCCESS) 87 if (status != EFI_SUCCESS)
94 goto fail; 88 goto fail;
95 89
90 *desc_size = 0;
91 key = 0;
96 status = efi_call_early(get_memory_map, map_size, m, 92 status = efi_call_early(get_memory_map, map_size, m,
97 &key, desc_size, &desc_version); 93 &key, desc_size, &desc_version);
98 if (status == EFI_BUFFER_TOO_SMALL) { 94 if (status == EFI_BUFFER_TOO_SMALL) {
99 efi_call_early(free_pool, m); 95 efi_call_early(free_pool, m);
100 return EFI_LOAD_ERROR; 96 goto again;
101 } 97 }
102 98
103 if (status != EFI_SUCCESS) 99 if (status != EFI_SUCCESS)
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 472fb5b8779f..9cdbc0c9cb2d 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -26,9 +26,12 @@ struct tps65912_gpio_data {
26 struct gpio_chip gpio_chip; 26 struct gpio_chip gpio_chip;
27}; 27};
28 28
29#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip)
30
29static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset) 31static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
30{ 32{
31 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); 33 struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
34 struct tps65912 *tps65912 = tps65912_gpio->tps65912;
32 int val; 35 int val;
33 36
34 val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset); 37 val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
@@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
42static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset, 45static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
43 int value) 46 int value)
44{ 47{
45 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); 48 struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
49 struct tps65912 *tps65912 = tps65912_gpio->tps65912;
46 50
47 if (value) 51 if (value)
48 tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset, 52 tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
@@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
55static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset, 59static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
56 int value) 60 int value)
57{ 61{
58 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); 62 struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
63 struct tps65912 *tps65912 = tps65912_gpio->tps65912;
59 64
60 /* Set the initial value */ 65 /* Set the initial value */
61 tps65912_gpio_set(gc, offset, value); 66 tps65912_gpio_set(gc, offset, value);
@@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
66 71
67static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset) 72static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
68{ 73{
69 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); 74 struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
75 struct tps65912 *tps65912 = tps65912_gpio->tps65912;
70 76
71 return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset, 77 return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
72 GPIO_CFG_MASK); 78 GPIO_CFG_MASK);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 8cad8e400b44..4650bf830d6b 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -46,12 +46,13 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
46 46
47 ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); 47 ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
48 if (ret < 0) { 48 if (ret < 0) {
49 /* We've found the gpio chip, but the translation failed. 49 /* We've found a gpio chip, but the translation failed.
50 * Return true to stop looking and return the translation 50 * Store translation error in out_gpio.
51 * error via out_gpio 51 * Return false to keep looking, as more than one gpio chip
52 * could be registered per of-node.
52 */ 53 */
53 gg_data->out_gpio = ERR_PTR(ret); 54 gg_data->out_gpio = ERR_PTR(ret);
54 return true; 55 return false;
55 } 56 }
56 57
57 gg_data->out_gpio = gpiochip_get_desc(gc, ret); 58 gg_data->out_gpio = gpiochip_get_desc(gc, ret);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index b3589d0e39b9..910ff8ab9c9c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -62,12 +62,18 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
62 return KFD_MQD_TYPE_CP; 62 return KFD_MQD_TYPE_CP;
63} 63}
64 64
65static inline unsigned int get_first_pipe(struct device_queue_manager *dqm) 65unsigned int get_first_pipe(struct device_queue_manager *dqm)
66{ 66{
67 BUG_ON(!dqm); 67 BUG_ON(!dqm || !dqm->dev);
68 return dqm->dev->shared_resources.first_compute_pipe; 68 return dqm->dev->shared_resources.first_compute_pipe;
69} 69}
70 70
71unsigned int get_pipes_num(struct device_queue_manager *dqm)
72{
73 BUG_ON(!dqm || !dqm->dev);
74 return dqm->dev->shared_resources.compute_pipe_count;
75}
76
71static inline unsigned int get_pipes_num_cpsch(void) 77static inline unsigned int get_pipes_num_cpsch(void)
72{ 78{
73 return PIPE_PER_ME_CP_SCHEDULING; 79 return PIPE_PER_ME_CP_SCHEDULING;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index d64f86cda34f..488f51d19427 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -163,6 +163,8 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
163 struct qcm_process_device *qpd); 163 struct qcm_process_device *qpd);
164int init_pipelines(struct device_queue_manager *dqm, 164int init_pipelines(struct device_queue_manager *dqm,
165 unsigned int pipes_num, unsigned int first_pipe); 165 unsigned int pipes_num, unsigned int first_pipe);
166unsigned int get_first_pipe(struct device_queue_manager *dqm);
167unsigned int get_pipes_num(struct device_queue_manager *dqm);
166 168
167extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) 169extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
168{ 170{
@@ -175,10 +177,4 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
175 return (pdd->lds_base >> 60) & 0x0E; 177 return (pdd->lds_base >> 60) & 0x0E;
176} 178}
177 179
178extern inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
179{
180 BUG_ON(!dqm || !dqm->dev);
181 return dqm->dev->shared_resources.compute_pipe_count;
182}
183
184#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */ 180#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 6b072466e2a6..5469efe0523e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -131,5 +131,5 @@ static int register_process_cik(struct device_queue_manager *dqm,
131 131
132static int initialize_cpsch_cik(struct device_queue_manager *dqm) 132static int initialize_cpsch_cik(struct device_queue_manager *dqm)
133{ 133{
134 return init_pipelines(dqm, get_pipes_num(dqm), 0); 134 return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
135} 135}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 0409b907de5d..b3e3068c6ec0 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -153,7 +153,7 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
153 (adj->crtc_hdisplay - 1) | 153 (adj->crtc_hdisplay - 1) |
154 ((adj->crtc_vdisplay - 1) << 16)); 154 ((adj->crtc_vdisplay - 1) << 16));
155 155
156 cfg = ATMEL_HLCDC_CLKPOL; 156 cfg = 0;
157 157
158 prate = clk_get_rate(crtc->dc->hlcdc->sys_clk); 158 prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
159 mode_rate = mode->crtc_clock * 1000; 159 mode_rate = mode->crtc_clock * 1000;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 7320a6c6613f..c1cb17493e0d 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -311,8 +311,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
311 311
312 pm_runtime_enable(dev->dev); 312 pm_runtime_enable(dev->dev);
313 313
314 pm_runtime_put_sync(dev->dev);
315
316 ret = atmel_hlcdc_dc_modeset_init(dev); 314 ret = atmel_hlcdc_dc_modeset_init(dev);
317 if (ret < 0) { 315 if (ret < 0) {
318 dev_err(dev->dev, "failed to initialize mode setting\n"); 316 dev_err(dev->dev, "failed to initialize mode setting\n");
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
index 063d2a7b941f..e79bd9ba474b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
@@ -311,7 +311,8 @@ int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
311 311
312 /* Disable the layer */ 312 /* Disable the layer */
313 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, 313 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
314 ATMEL_HLCDC_LAYER_RST); 314 ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q |
315 ATMEL_HLCDC_LAYER_UPDATE);
315 316
316 /* Clear all pending interrupts */ 317 /* Clear all pending interrupts */
317 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr); 318 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6b00173d1be4..6b6b07ff720b 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2127,7 +2127,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
2127 DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); 2127 DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
2128 2128
2129 mutex_lock(&dev->mode_config.mutex); 2129 mutex_lock(&dev->mode_config.mutex);
2130 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
2131 2130
2132 connector = drm_connector_find(dev, out_resp->connector_id); 2131 connector = drm_connector_find(dev, out_resp->connector_id);
2133 if (!connector) { 2132 if (!connector) {
@@ -2157,6 +2156,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
2157 out_resp->mm_height = connector->display_info.height_mm; 2156 out_resp->mm_height = connector->display_info.height_mm;
2158 out_resp->subpixel = connector->display_info.subpixel_order; 2157 out_resp->subpixel = connector->display_info.subpixel_order;
2159 out_resp->connection = connector->status; 2158 out_resp->connection = connector->status;
2159
2160 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
2160 encoder = drm_connector_get_encoder(connector); 2161 encoder = drm_connector_get_encoder(connector);
2161 if (encoder) 2162 if (encoder)
2162 out_resp->encoder_id = encoder->base.id; 2163 out_resp->encoder_id = encoder->base.id;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f2a825e39646..8727086cf48c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2114,6 +2114,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
2114 * number comparisons on buffer last_read|write_seqno. It also allows an 2114 * number comparisons on buffer last_read|write_seqno. It also allows an
2115 * emission time to be associated with the request for tracking how far ahead 2115 * emission time to be associated with the request for tracking how far ahead
2116 * of the GPU the submission is. 2116 * of the GPU the submission is.
2117 *
2118 * The requests are reference counted, so upon creation they should have an
2119 * initial reference taken using kref_init
2117 */ 2120 */
2118struct drm_i915_gem_request { 2121struct drm_i915_gem_request {
2119 struct kref ref; 2122 struct kref ref;
@@ -2137,7 +2140,16 @@ struct drm_i915_gem_request {
2137 /** Position in the ringbuffer of the end of the whole request */ 2140 /** Position in the ringbuffer of the end of the whole request */
2138 u32 tail; 2141 u32 tail;
2139 2142
2140 /** Context related to this request */ 2143 /**
2144 * Context related to this request
2145 * Contexts are refcounted, so when this request is associated with a
2146 * context, we must increment the context's refcount, to guarantee that
2147 * it persists while any request is linked to it. Requests themselves
2148 * are also refcounted, so the request will only be freed when the last
2149 * reference to it is dismissed, and the code in
2150 * i915_gem_request_free() will then decrement the refcount on the
2151 * context.
2152 */
2141 struct intel_context *ctx; 2153 struct intel_context *ctx;
2142 2154
2143 /** Batch buffer related to this request if any */ 2155 /** Batch buffer related to this request if any */
@@ -2374,6 +2386,7 @@ struct drm_i915_cmd_table {
2374 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2386 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
2375#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2387#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
2376 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2388 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
2389 (INTEL_DEVID(dev) & 0xf) == 0xb || \
2377 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2390 (INTEL_DEVID(dev) & 0xf) == 0xe))
2378#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ 2391#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
2379 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2392 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c26d36cc4b31..e5daad5f75fb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2659,8 +2659,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2659 if (submit_req->ctx != ring->default_context) 2659 if (submit_req->ctx != ring->default_context)
2660 intel_lr_context_unpin(ring, submit_req->ctx); 2660 intel_lr_context_unpin(ring, submit_req->ctx);
2661 2661
2662 i915_gem_context_unreference(submit_req->ctx); 2662 i915_gem_request_unreference(submit_req);
2663 kfree(submit_req);
2664 } 2663 }
2665 2664
2666 /* 2665 /*
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index a2045848bd1a..9c6f93ec886b 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -485,10 +485,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
485 stolen_offset, gtt_offset, size); 485 stolen_offset, gtt_offset, size);
486 486
487 /* KISS and expect everything to be page-aligned */ 487 /* KISS and expect everything to be page-aligned */
488 BUG_ON(stolen_offset & 4095); 488 if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
489 BUG_ON(size & 4095); 489 WARN_ON(stolen_offset & 4095))
490
491 if (WARN_ON(size == 0))
492 return NULL; 490 return NULL;
493 491
494 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 492 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7a24bd1a51f6..6377b22269ad 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -335,9 +335,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
335 return -EINVAL; 335 return -EINVAL;
336 } 336 }
337 337
338 mutex_lock(&dev->struct_mutex);
338 if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { 339 if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
339 drm_gem_object_unreference_unlocked(&obj->base); 340 ret = -EBUSY;
340 return -EBUSY; 341 goto err;
341 } 342 }
342 343
343 if (args->tiling_mode == I915_TILING_NONE) { 344 if (args->tiling_mode == I915_TILING_NONE) {
@@ -369,7 +370,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
369 } 370 }
370 } 371 }
371 372
372 mutex_lock(&dev->struct_mutex);
373 if (args->tiling_mode != obj->tiling_mode || 373 if (args->tiling_mode != obj->tiling_mode ||
374 args->stride != obj->stride) { 374 args->stride != obj->stride) {
375 /* We need to rebind the object if its current allocation 375 /* We need to rebind the object if its current allocation
@@ -424,6 +424,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
424 obj->bit_17 = NULL; 424 obj->bit_17 = NULL;
425 } 425 }
426 426
427err:
427 drm_gem_object_unreference(&obj->base); 428 drm_gem_object_unreference(&obj->base);
428 mutex_unlock(&dev->struct_mutex); 429 mutex_unlock(&dev->struct_mutex);
429 430
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4145d95902f5..ede5bbbd8a08 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1892,6 +1892,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1892 u32 iir, gt_iir, pm_iir; 1892 u32 iir, gt_iir, pm_iir;
1893 irqreturn_t ret = IRQ_NONE; 1893 irqreturn_t ret = IRQ_NONE;
1894 1894
1895 if (!intel_irqs_enabled(dev_priv))
1896 return IRQ_NONE;
1897
1895 while (true) { 1898 while (true) {
1896 /* Find, clear, then process each source of interrupt */ 1899 /* Find, clear, then process each source of interrupt */
1897 1900
@@ -1936,6 +1939,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1936 u32 master_ctl, iir; 1939 u32 master_ctl, iir;
1937 irqreturn_t ret = IRQ_NONE; 1940 irqreturn_t ret = IRQ_NONE;
1938 1941
1942 if (!intel_irqs_enabled(dev_priv))
1943 return IRQ_NONE;
1944
1939 for (;;) { 1945 for (;;) {
1940 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1946 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1941 iir = I915_READ(VLV_IIR); 1947 iir = I915_READ(VLV_IIR);
@@ -2208,6 +2214,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2208 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2214 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2209 irqreturn_t ret = IRQ_NONE; 2215 irqreturn_t ret = IRQ_NONE;
2210 2216
2217 if (!intel_irqs_enabled(dev_priv))
2218 return IRQ_NONE;
2219
2211 /* We get interrupts on unclaimed registers, so check for this before we 2220 /* We get interrupts on unclaimed registers, so check for this before we
2212 * do any I915_{READ,WRITE}. */ 2221 * do any I915_{READ,WRITE}. */
2213 intel_uncore_check_errors(dev); 2222 intel_uncore_check_errors(dev);
@@ -2279,6 +2288,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2279 enum pipe pipe; 2288 enum pipe pipe;
2280 u32 aux_mask = GEN8_AUX_CHANNEL_A; 2289 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2281 2290
2291 if (!intel_irqs_enabled(dev_priv))
2292 return IRQ_NONE;
2293
2282 if (IS_GEN9(dev)) 2294 if (IS_GEN9(dev))
2283 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 2295 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2284 GEN9_AUX_CHANNEL_D; 2296 GEN9_AUX_CHANNEL_D;
@@ -3771,6 +3783,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3771 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3783 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3772 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3784 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3773 3785
3786 if (!intel_irqs_enabled(dev_priv))
3787 return IRQ_NONE;
3788
3774 iir = I915_READ16(IIR); 3789 iir = I915_READ16(IIR);
3775 if (iir == 0) 3790 if (iir == 0)
3776 return IRQ_NONE; 3791 return IRQ_NONE;
@@ -3951,6 +3966,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3951 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3966 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3952 int pipe, ret = IRQ_NONE; 3967 int pipe, ret = IRQ_NONE;
3953 3968
3969 if (!intel_irqs_enabled(dev_priv))
3970 return IRQ_NONE;
3971
3954 iir = I915_READ(IIR); 3972 iir = I915_READ(IIR);
3955 do { 3973 do {
3956 bool irq_received = (iir & ~flip_mask) != 0; 3974 bool irq_received = (iir & ~flip_mask) != 0;
@@ -4171,6 +4189,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4171 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4189 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4172 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4190 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4173 4191
4192 if (!intel_irqs_enabled(dev_priv))
4193 return IRQ_NONE;
4194
4174 iir = I915_READ(IIR); 4195 iir = I915_READ(IIR);
4175 4196
4176 for (;;) { 4197 for (;;) {
@@ -4520,6 +4541,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4520{ 4541{
4521 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4542 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4522 dev_priv->pm.irqs_enabled = false; 4543 dev_priv->pm.irqs_enabled = false;
4544 synchronize_irq(dev_priv->dev->irq);
4523} 4545}
4524 4546
4525/** 4547/**
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3d220a67f865..e730789b53b7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2371,13 +2371,19 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
2371 struct drm_device *dev = crtc->base.dev; 2371 struct drm_device *dev = crtc->base.dev;
2372 struct drm_i915_gem_object *obj = NULL; 2372 struct drm_i915_gem_object *obj = NULL;
2373 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2373 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2374 u32 base = plane_config->base; 2374 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2375 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2376 PAGE_SIZE);
2377
2378 size_aligned -= base_aligned;
2375 2379
2376 if (plane_config->size == 0) 2380 if (plane_config->size == 0)
2377 return false; 2381 return false;
2378 2382
2379 obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, 2383 obj = i915_gem_object_create_stolen_for_preallocated(dev,
2380 plane_config->size); 2384 base_aligned,
2385 base_aligned,
2386 size_aligned);
2381 if (!obj) 2387 if (!obj)
2382 return false; 2388 return false;
2383 2389
@@ -2725,10 +2731,19 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
2725 case DRM_FORMAT_XRGB8888: 2731 case DRM_FORMAT_XRGB8888:
2726 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; 2732 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2727 break; 2733 break;
2734 case DRM_FORMAT_ARGB8888:
2735 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2736 plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2737 break;
2728 case DRM_FORMAT_XBGR8888: 2738 case DRM_FORMAT_XBGR8888:
2729 plane_ctl |= PLANE_CTL_ORDER_RGBX; 2739 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2730 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; 2740 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2731 break; 2741 break;
2742 case DRM_FORMAT_ABGR8888:
2743 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2744 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2745 plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2746 break;
2732 case DRM_FORMAT_XRGB2101010: 2747 case DRM_FORMAT_XRGB2101010:
2733 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010; 2748 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2734 break; 2749 break;
@@ -6627,7 +6642,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6627 aligned_height = intel_fb_align_height(dev, fb->height, 6642 aligned_height = intel_fb_align_height(dev, fb->height,
6628 plane_config->tiling); 6643 plane_config->tiling);
6629 6644
6630 plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); 6645 plane_config->size = fb->pitches[0] * aligned_height;
6631 6646
6632 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 6647 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6633 pipe_name(pipe), plane, fb->width, fb->height, 6648 pipe_name(pipe), plane, fb->width, fb->height,
@@ -7664,7 +7679,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7664 aligned_height = intel_fb_align_height(dev, fb->height, 7679 aligned_height = intel_fb_align_height(dev, fb->height,
7665 plane_config->tiling); 7680 plane_config->tiling);
7666 7681
7667 plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE); 7682 plane_config->size = fb->pitches[0] * aligned_height;
7668 7683
7669 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7684 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7670 pipe_name(pipe), fb->width, fb->height, 7685 pipe_name(pipe), fb->width, fb->height,
@@ -7755,7 +7770,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7755 aligned_height = intel_fb_align_height(dev, fb->height, 7770 aligned_height = intel_fb_align_height(dev, fb->height,
7756 plane_config->tiling); 7771 plane_config->tiling);
7757 7772
7758 plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); 7773 plane_config->size = fb->pitches[0] * aligned_height;
7759 7774
7760 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7775 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7761 pipe_name(pipe), fb->width, fb->height, 7776 pipe_name(pipe), fb->width, fb->height,
@@ -8698,6 +8713,7 @@ retry:
8698 old->release_fb->funcs->destroy(old->release_fb); 8713 old->release_fb->funcs->destroy(old->release_fb);
8699 goto fail; 8714 goto fail;
8700 } 8715 }
8716 crtc->primary->crtc = crtc;
8701 8717
8702 /* let the connector get through one full cycle before testing */ 8718 /* let the connector get through one full cycle before testing */
8703 intel_wait_for_vblank(dev, intel_crtc->pipe); 8719 intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -12182,9 +12198,6 @@ intel_check_cursor_plane(struct drm_plane *plane,
12182 return -ENOMEM; 12198 return -ENOMEM;
12183 } 12199 }
12184 12200
12185 if (fb == crtc->cursor->fb)
12186 return 0;
12187
12188 /* we only need to pin inside GTT if cursor is non-phy */ 12201 /* we only need to pin inside GTT if cursor is non-phy */
12189 mutex_lock(&dev->struct_mutex); 12202 mutex_lock(&dev->struct_mutex);
12190 if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) { 12203 if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
@@ -13096,6 +13109,9 @@ static struct intel_quirk intel_quirks[] = {
13096 13109
13097 /* HP Chromebook 14 (Celeron 2955U) */ 13110 /* HP Chromebook 14 (Celeron 2955U) */
13098 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 13111 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
13112
13113 /* Dell Chromebook 11 */
13114 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
13099}; 13115};
13100 13116
13101static void intel_init_quirks(struct drm_device *dev) 13117static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0f358c5999ec..e8d3da9f3373 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -503,18 +503,19 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
503 * If there isn't a request associated with this submission, 503 * If there isn't a request associated with this submission,
504 * create one as a temporary holder. 504 * create one as a temporary holder.
505 */ 505 */
506 WARN(1, "execlist context submission without request");
507 request = kzalloc(sizeof(*request), GFP_KERNEL); 506 request = kzalloc(sizeof(*request), GFP_KERNEL);
508 if (request == NULL) 507 if (request == NULL)
509 return -ENOMEM; 508 return -ENOMEM;
510 request->ring = ring; 509 request->ring = ring;
511 request->ctx = to; 510 request->ctx = to;
511 kref_init(&request->ref);
512 request->uniq = dev_priv->request_uniq++;
513 i915_gem_context_reference(request->ctx);
512 } else { 514 } else {
515 i915_gem_request_reference(request);
513 WARN_ON(to != request->ctx); 516 WARN_ON(to != request->ctx);
514 } 517 }
515 request->tail = tail; 518 request->tail = tail;
516 i915_gem_request_reference(request);
517 i915_gem_context_reference(request->ctx);
518 519
519 intel_runtime_pm_get(dev_priv); 520 intel_runtime_pm_get(dev_priv);
520 521
@@ -731,7 +732,6 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
731 if (ctx_obj && (ctx != ring->default_context)) 732 if (ctx_obj && (ctx != ring->default_context))
732 intel_lr_context_unpin(ring, ctx); 733 intel_lr_context_unpin(ring, ctx);
733 intel_runtime_pm_put(dev_priv); 734 intel_runtime_pm_put(dev_priv);
734 i915_gem_context_unreference(ctx);
735 list_del(&req->execlist_link); 735 list_del(&req->execlist_link);
736 i915_gem_request_unreference(req); 736 i915_gem_request_unreference(req);
737 } 737 }
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 5bf825dfaa09..8d74de82456e 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -178,6 +178,13 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
178 switch (msg->request & ~DP_AUX_I2C_MOT) { 178 switch (msg->request & ~DP_AUX_I2C_MOT) {
179 case DP_AUX_NATIVE_WRITE: 179 case DP_AUX_NATIVE_WRITE:
180 case DP_AUX_I2C_WRITE: 180 case DP_AUX_I2C_WRITE:
181 /* The atom implementation only supports writes with a max payload of
182 * 12 bytes since it uses 4 bits for the total count (header + payload)
183 * in the parameter space. The atom interface supports 16 byte
184 * payloads for reads. The hw itself supports up to 16 bytes of payload.
185 */
186 if (WARN_ON_ONCE(msg->size > 12))
187 return -E2BIG;
181 /* tx_size needs to be 4 even for bare address packets since the atom 188 /* tx_size needs to be 4 even for bare address packets since the atom
182 * table needs the info in tx_buf[3]. 189 * table needs the info in tx_buf[3].
183 */ 190 */
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 7c9df1eac065..7fe7b749e182 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -731,7 +731,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
731 dig_connector = radeon_connector->con_priv; 731 dig_connector = radeon_connector->con_priv;
732 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 732 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
733 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { 733 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
734 if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) 734 if (radeon_audio != 0 &&
735 drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
736 ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
735 return ATOM_ENCODER_MODE_DP_AUDIO; 737 return ATOM_ENCODER_MODE_DP_AUDIO;
736 return ATOM_ENCODER_MODE_DP; 738 return ATOM_ENCODER_MODE_DP;
737 } else if (radeon_audio != 0) { 739 } else if (radeon_audio != 0) {
@@ -747,7 +749,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
747 } 749 }
748 break; 750 break;
749 case DRM_MODE_CONNECTOR_eDP: 751 case DRM_MODE_CONNECTOR_eDP:
750 if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) 752 if (radeon_audio != 0 &&
753 drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
754 ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
751 return ATOM_ENCODER_MODE_DP_AUDIO; 755 return ATOM_ENCODER_MODE_DP_AUDIO;
752 return ATOM_ENCODER_MODE_DP; 756 return ATOM_ENCODER_MODE_DP;
753 case DRM_MODE_CONNECTOR_DVIA: 757 case DRM_MODE_CONNECTOR_DVIA:
@@ -1720,8 +1724,10 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1720 } 1724 }
1721 1725
1722 encoder_mode = atombios_get_encoder_mode(encoder); 1726 encoder_mode = atombios_get_encoder_mode(encoder);
1723 if (radeon_audio != 0 && 1727 if (connector && (radeon_audio != 0) &&
1724 (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode))) 1728 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
1729 (ENCODER_MODE_IS_DP(encoder_mode) &&
1730 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
1725 radeon_audio_dpms(encoder, mode); 1731 radeon_audio_dpms(encoder, mode);
1726} 1732}
1727 1733
@@ -2136,6 +2142,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2136 struct drm_device *dev = encoder->dev; 2142 struct drm_device *dev = encoder->dev;
2137 struct radeon_device *rdev = dev->dev_private; 2143 struct radeon_device *rdev = dev->dev_private;
2138 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2144 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2145 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
2139 int encoder_mode; 2146 int encoder_mode;
2140 2147
2141 radeon_encoder->pixel_clock = adjusted_mode->clock; 2148 radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -2164,8 +2171,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2164 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 2171 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2165 /* handled in dpms */ 2172 /* handled in dpms */
2166 encoder_mode = atombios_get_encoder_mode(encoder); 2173 encoder_mode = atombios_get_encoder_mode(encoder);
2167 if (radeon_audio != 0 && 2174 if (connector && (radeon_audio != 0) &&
2168 (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode))) 2175 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
2176 (ENCODER_MODE_IS_DP(encoder_mode) &&
2177 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
2169 radeon_audio_mode_set(encoder, adjusted_mode); 2178 radeon_audio_mode_set(encoder, adjusted_mode);
2170 break; 2179 break;
2171 case ENCODER_OBJECT_ID_INTERNAL_DDI: 2180 case ENCODER_OBJECT_ID_INTERNAL_DDI:
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index e6a4ba236c70..0c993da9c8fb 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3613,6 +3613,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
3613 } 3613 }
3614 3614
3615 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 3615 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3616 WREG32(SRBM_INT_CNTL, 0x1);
3617 WREG32(SRBM_INT_ACK, 0x1);
3616 3618
3617 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); 3619 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
3618 3620
@@ -7230,6 +7232,8 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
7230 WREG32(CP_ME2_PIPE3_INT_CNTL, 0); 7232 WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
7231 /* grbm */ 7233 /* grbm */
7232 WREG32(GRBM_INT_CNTL, 0); 7234 WREG32(GRBM_INT_CNTL, 0);
7235 /* SRBM */
7236 WREG32(SRBM_INT_CNTL, 0);
7233 /* vline/vblank, etc. */ 7237 /* vline/vblank, etc. */
7234 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 7238 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
7235 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 7239 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -8046,6 +8050,10 @@ restart_ih:
8046 break; 8050 break;
8047 } 8051 }
8048 break; 8052 break;
8053 case 96:
8054 DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
8055 WREG32(SRBM_INT_ACK, 0x1);
8056 break;
8049 case 124: /* UVD */ 8057 case 124: /* UVD */
8050 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); 8058 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
8051 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); 8059 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 03003f8a6de6..c648e1996dab 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -482,6 +482,10 @@
482#define SOFT_RESET_ORB (1 << 23) 482#define SOFT_RESET_ORB (1 << 23)
483#define SOFT_RESET_VCE (1 << 24) 483#define SOFT_RESET_VCE (1 << 24)
484 484
485#define SRBM_READ_ERROR 0xE98
486#define SRBM_INT_CNTL 0xEA0
487#define SRBM_INT_ACK 0xEA8
488
485#define VM_L2_CNTL 0x1400 489#define VM_L2_CNTL 0x1400
486#define ENABLE_L2_CACHE (1 << 0) 490#define ENABLE_L2_CACHE (1 << 0)
487#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) 491#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 78600f534c80..4c0e24b3bb90 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3253,6 +3253,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
3253 } 3253 }
3254 3254
3255 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 3255 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3256 WREG32(SRBM_INT_CNTL, 0x1);
3257 WREG32(SRBM_INT_ACK, 0x1);
3256 3258
3257 evergreen_fix_pci_max_read_req_size(rdev); 3259 evergreen_fix_pci_max_read_req_size(rdev);
3258 3260
@@ -4324,6 +4326,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
4324 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 4326 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
4325 WREG32(DMA_CNTL, tmp); 4327 WREG32(DMA_CNTL, tmp);
4326 WREG32(GRBM_INT_CNTL, 0); 4328 WREG32(GRBM_INT_CNTL, 0);
4329 WREG32(SRBM_INT_CNTL, 0);
4327 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 4330 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
4328 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 4331 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
4329 if (rdev->num_crtc >= 4) { 4332 if (rdev->num_crtc >= 4) {
@@ -5066,6 +5069,10 @@ restart_ih:
5066 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 5069 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
5067 break; 5070 break;
5068 } 5071 }
5072 case 96:
5073 DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
5074 WREG32(SRBM_INT_ACK, 0x1);
5075 break;
5069 case 124: /* UVD */ 5076 case 124: /* UVD */
5070 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); 5077 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
5071 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); 5078 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index ee83d2a88750..a8d1d5240fcb 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1191,6 +1191,10 @@
1191#define SOFT_RESET_REGBB (1 << 22) 1191#define SOFT_RESET_REGBB (1 << 22)
1192#define SOFT_RESET_ORB (1 << 23) 1192#define SOFT_RESET_ORB (1 << 23)
1193 1193
1194#define SRBM_READ_ERROR 0xE98
1195#define SRBM_INT_CNTL 0xEA0
1196#define SRBM_INT_ACK 0xEA8
1197
1194/* display watermarks */ 1198/* display watermarks */
1195#define DC_LB_MEMORY_SPLIT 0x6b0c 1199#define DC_LB_MEMORY_SPLIT 0x6b0c
1196#define PRIORITY_A_CNT 0x6b18 1200#define PRIORITY_A_CNT 0x6b18
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 24242a7f0ac3..dab00812abaa 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -962,6 +962,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
962 } 962 }
963 963
964 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 964 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
965 WREG32(SRBM_INT_CNTL, 0x1);
966 WREG32(SRBM_INT_ACK, 0x1);
965 967
966 evergreen_fix_pci_max_read_req_size(rdev); 968 evergreen_fix_pci_max_read_req_size(rdev);
967 969
@@ -1086,12 +1088,12 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1086 1088
1087 if ((rdev->config.cayman.max_backends_per_se == 1) && 1089 if ((rdev->config.cayman.max_backends_per_se == 1) &&
1088 (rdev->flags & RADEON_IS_IGP)) { 1090 (rdev->flags & RADEON_IS_IGP)) {
1089 if ((disabled_rb_mask & 3) == 1) { 1091 if ((disabled_rb_mask & 3) == 2) {
1090 /* RB0 disabled, RB1 enabled */
1091 tmp = 0x11111111;
1092 } else {
1093 /* RB1 disabled, RB0 enabled */ 1092 /* RB1 disabled, RB0 enabled */
1094 tmp = 0x00000000; 1093 tmp = 0x00000000;
1094 } else {
1095 /* RB0 disabled, RB1 enabled */
1096 tmp = 0x11111111;
1095 } 1097 }
1096 } else { 1098 } else {
1097 tmp = gb_addr_config & NUM_PIPES_MASK; 1099 tmp = gb_addr_config & NUM_PIPES_MASK;
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index ad7125486894..6b44580440d0 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -82,6 +82,10 @@
82#define SOFT_RESET_REGBB (1 << 22) 82#define SOFT_RESET_REGBB (1 << 22)
83#define SOFT_RESET_ORB (1 << 23) 83#define SOFT_RESET_ORB (1 << 23)
84 84
85#define SRBM_READ_ERROR 0xE98
86#define SRBM_INT_CNTL 0xEA0
87#define SRBM_INT_ACK 0xEA8
88
85#define SRBM_STATUS2 0x0EC4 89#define SRBM_STATUS2 0x0EC4
86#define DMA_BUSY (1 << 5) 90#define DMA_BUSY (1 << 5)
87#define DMA1_BUSY (1 << 6) 91#define DMA1_BUSY (1 << 6)
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 843b65f46ece..fa2154493cf1 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -188,7 +188,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
188 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 188 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
189 radeon_crtc = to_radeon_crtc(crtc); 189 radeon_crtc = to_radeon_crtc(crtc);
190 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { 190 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
191 vrefresh = radeon_crtc->hw_mode.vrefresh; 191 vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
192 break; 192 break;
193 } 193 }
194 } 194 }
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index c830863bc98a..a579ed379f20 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -715,6 +715,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
715 struct radeon_cs_chunk *ib_chunk = p->chunk_ib; 715 struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
716 struct radeon_device *rdev = p->rdev; 716 struct radeon_device *rdev = p->rdev;
717 uint32_t header; 717 uint32_t header;
718 int ret = 0, i;
718 719
719 if (idx >= ib_chunk->length_dw) { 720 if (idx >= ib_chunk->length_dw) {
720 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 721 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
@@ -743,14 +744,25 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
743 break; 744 break;
744 default: 745 default:
745 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); 746 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
746 return -EINVAL; 747 ret = -EINVAL;
748 goto dump_ib;
747 } 749 }
748 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { 750 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
749 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", 751 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
750 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); 752 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
751 return -EINVAL; 753 ret = -EINVAL;
754 goto dump_ib;
752 } 755 }
753 return 0; 756 return 0;
757
758dump_ib:
759 for (i = 0; i < ib_chunk->length_dw; i++) {
760 if (i == idx)
761 printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
762 else
763 printk("\t0x%08x\n", radeon_get_ib_value(p, i));
764 }
765 return ret;
754} 766}
755 767
756/** 768/**
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 6b670b0bc47b..3a297037cc17 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -179,9 +179,12 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
179 (rdev->pdev->subsystem_vendor == 0x1734) && 179 (rdev->pdev->subsystem_vendor == 0x1734) &&
180 (rdev->pdev->subsystem_device == 0x1107)) 180 (rdev->pdev->subsystem_device == 0x1107))
181 use_bl = false; 181 use_bl = false;
182/* Older PPC macs use on-GPU backlight controller */
183#ifndef CONFIG_PPC_PMAC
182 /* disable native backlight control on older asics */ 184 /* disable native backlight control on older asics */
183 else if (rdev->family < CHIP_R600) 185 else if (rdev->family < CHIP_R600)
184 use_bl = false; 186 use_bl = false;
187#endif
185 else 188 else
186 use_bl = true; 189 use_bl = true;
187 } 190 }
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 9f758d39420d..33cf4108386d 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -852,6 +852,12 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
852 single_display = false; 852 single_display = false;
853 } 853 }
854 854
855 /* 120hz tends to be problematic even if they are under the
856 * vblank limit.
857 */
858 if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
859 single_display = false;
860
855 /* certain older asics have a separare 3D performance state, 861 /* certain older asics have a separare 3D performance state,
856 * so try that first if the user selected performance 862 * so try that first if the user selected performance
857 */ 863 */
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 73107fe9e46f..bcf516a8a2f1 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3162,6 +3162,8 @@ static void si_gpu_init(struct radeon_device *rdev)
3162 } 3162 }
3163 3163
3164 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 3164 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3165 WREG32(SRBM_INT_CNTL, 1);
3166 WREG32(SRBM_INT_ACK, 1);
3165 3167
3166 evergreen_fix_pci_max_read_req_size(rdev); 3168 evergreen_fix_pci_max_read_req_size(rdev);
3167 3169
@@ -4699,12 +4701,6 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4699 switch (pkt.type) { 4701 switch (pkt.type) {
4700 case RADEON_PACKET_TYPE0: 4702 case RADEON_PACKET_TYPE0:
4701 dev_err(rdev->dev, "Packet0 not allowed!\n"); 4703 dev_err(rdev->dev, "Packet0 not allowed!\n");
4702 for (i = 0; i < ib->length_dw; i++) {
4703 if (i == idx)
4704 printk("\t0x%08x <---\n", ib->ptr[i]);
4705 else
4706 printk("\t0x%08x\n", ib->ptr[i]);
4707 }
4708 ret = -EINVAL; 4704 ret = -EINVAL;
4709 break; 4705 break;
4710 case RADEON_PACKET_TYPE2: 4706 case RADEON_PACKET_TYPE2:
@@ -4736,8 +4732,15 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4736 ret = -EINVAL; 4732 ret = -EINVAL;
4737 break; 4733 break;
4738 } 4734 }
4739 if (ret) 4735 if (ret) {
4736 for (i = 0; i < ib->length_dw; i++) {
4737 if (i == idx)
4738 printk("\t0x%08x <---\n", ib->ptr[i]);
4739 else
4740 printk("\t0x%08x\n", ib->ptr[i]);
4741 }
4740 break; 4742 break;
4743 }
4741 } while (idx < ib->length_dw); 4744 } while (idx < ib->length_dw);
4742 4745
4743 return ret; 4746 return ret;
@@ -5910,6 +5913,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
5910 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; 5913 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
5911 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); 5914 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
5912 WREG32(GRBM_INT_CNTL, 0); 5915 WREG32(GRBM_INT_CNTL, 0);
5916 WREG32(SRBM_INT_CNTL, 0);
5913 if (rdev->num_crtc >= 2) { 5917 if (rdev->num_crtc >= 2) {
5914 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 5918 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5915 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 5919 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -6609,6 +6613,10 @@ restart_ih:
6609 break; 6613 break;
6610 } 6614 }
6611 break; 6615 break;
6616 case 96:
6617 DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
6618 WREG32(SRBM_INT_ACK, 0x1);
6619 break;
6612 case 124: /* UVD */ 6620 case 124: /* UVD */
6613 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); 6621 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
6614 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); 6622 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index cbd91d226f3c..c27118cab16a 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -358,6 +358,10 @@
358#define CC_SYS_RB_BACKEND_DISABLE 0xe80 358#define CC_SYS_RB_BACKEND_DISABLE 0xe80
359#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 359#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
360 360
361#define SRBM_READ_ERROR 0xE98
362#define SRBM_INT_CNTL 0xEA0
363#define SRBM_INT_ACK 0xEA8
364
361#define SRBM_STATUS2 0x0EC4 365#define SRBM_STATUS2 0x0EC4
362#define DMA_BUSY (1 << 5) 366#define DMA_BUSY (1 << 5)
363#define DMA1_BUSY (1 << 6) 367#define DMA1_BUSY (1 << 6)
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 3aaa84ae2681..1a52522f5da7 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -997,8 +997,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
997 crtc->state = NULL; 997 crtc->state = NULL;
998 998
999 state = kzalloc(sizeof(*state), GFP_KERNEL); 999 state = kzalloc(sizeof(*state), GFP_KERNEL);
1000 if (state) 1000 if (state) {
1001 crtc->state = &state->base; 1001 crtc->state = &state->base;
1002 crtc->state->crtc = crtc;
1003 }
1002} 1004}
1003 1005
1004static struct drm_crtc_state * 1006static struct drm_crtc_state *
@@ -1012,6 +1014,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
1012 return NULL; 1014 return NULL;
1013 1015
1014 copy->base.mode_changed = false; 1016 copy->base.mode_changed = false;
1017 copy->base.active_changed = false;
1015 copy->base.planes_changed = false; 1018 copy->base.planes_changed = false;
1016 copy->base.event = NULL; 1019 copy->base.event = NULL;
1017 1020
@@ -1227,9 +1230,6 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
1227 /* program display mode */ 1230 /* program display mode */
1228 tegra_dc_set_timings(dc, mode); 1231 tegra_dc_set_timings(dc, mode);
1229 1232
1230 if (dc->soc->supports_border_color)
1231 tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
1232
1233 /* interlacing isn't supported yet, so disable it */ 1233 /* interlacing isn't supported yet, so disable it */
1234 if (dc->soc->supports_interlacing) { 1234 if (dc->soc->supports_interlacing) {
1235 value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL); 1235 value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
@@ -1252,42 +1252,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
1252 1252
1253static void tegra_crtc_prepare(struct drm_crtc *crtc) 1253static void tegra_crtc_prepare(struct drm_crtc *crtc)
1254{ 1254{
1255 struct tegra_dc *dc = to_tegra_dc(crtc);
1256 unsigned int syncpt;
1257 unsigned long value;
1258
1259 drm_crtc_vblank_off(crtc); 1255 drm_crtc_vblank_off(crtc);
1260
1261 if (dc->pipe)
1262 syncpt = SYNCPT_VBLANK1;
1263 else
1264 syncpt = SYNCPT_VBLANK0;
1265
1266 /* initialize display controller */
1267 tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
1268 tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
1269
1270 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
1271 tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
1272
1273 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1274 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1275 tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
1276
1277 /* initialize timer */
1278 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
1279 WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
1280 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
1281
1282 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
1283 WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
1284 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
1285
1286 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
1287 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
1288
1289 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
1290 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
1291} 1256}
1292 1257
1293static void tegra_crtc_commit(struct drm_crtc *crtc) 1258static void tegra_crtc_commit(struct drm_crtc *crtc)
@@ -1664,6 +1629,8 @@ static int tegra_dc_init(struct host1x_client *client)
1664 struct tegra_drm *tegra = drm->dev_private; 1629 struct tegra_drm *tegra = drm->dev_private;
1665 struct drm_plane *primary = NULL; 1630 struct drm_plane *primary = NULL;
1666 struct drm_plane *cursor = NULL; 1631 struct drm_plane *cursor = NULL;
1632 unsigned int syncpt;
1633 u32 value;
1667 int err; 1634 int err;
1668 1635
1669 if (tegra->domain) { 1636 if (tegra->domain) {
@@ -1730,6 +1697,40 @@ static int tegra_dc_init(struct host1x_client *client)
1730 goto cleanup; 1697 goto cleanup;
1731 } 1698 }
1732 1699
1700 /* initialize display controller */
1701 if (dc->pipe)
1702 syncpt = SYNCPT_VBLANK1;
1703 else
1704 syncpt = SYNCPT_VBLANK0;
1705
1706 tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
1707 tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
1708
1709 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
1710 tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
1711
1712 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1713 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1714 tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
1715
1716 /* initialize timer */
1717 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
1718 WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
1719 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
1720
1721 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
1722 WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
1723 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
1724
1725 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
1726 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
1727
1728 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
1729 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
1730
1731 if (dc->soc->supports_border_color)
1732 tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
1733
1733 return 0; 1734 return 0;
1734 1735
1735cleanup: 1736cleanup:
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 7e06657ae58b..7eaaee74a039 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -851,6 +851,14 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
851 h_back_porch = mode->htotal - mode->hsync_end; 851 h_back_porch = mode->htotal - mode->hsync_end;
852 h_front_porch = mode->hsync_start - mode->hdisplay; 852 h_front_porch = mode->hsync_start - mode->hdisplay;
853 853
854 err = clk_set_rate(hdmi->clk, pclk);
855 if (err < 0) {
856 dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
857 err);
858 }
859
860 DRM_DEBUG_KMS("HDMI clock rate: %lu Hz\n", clk_get_rate(hdmi->clk));
861
854 /* power up sequence */ 862 /* power up sequence */
855 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0); 863 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
856 value &= ~SOR_PLL_PDBG; 864 value &= ~SOR_PLL_PDBG;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index db4fb6e1cc5b..7c669c328c4c 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1872,6 +1872,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1872 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, 1872 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
1873 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) }, 1873 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
1874 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) }, 1874 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
1875 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
1875 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) }, 1876 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
1876 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, 1877 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
1877 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, 1878 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
@@ -1926,6 +1927,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1926#endif 1927#endif
1927#if IS_ENABLED(CONFIG_HID_SAITEK) 1928#if IS_ENABLED(CONFIG_HID_SAITEK)
1928 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) }, 1929 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
1930 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
1929 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) }, 1931 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
1930 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) }, 1932 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
1931 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, 1933 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 46edb4d3ed28..204312bfab2c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -654,6 +654,7 @@
654#define USB_DEVICE_ID_MS_LK6K 0x00f9 654#define USB_DEVICE_ID_MS_LK6K 0x00f9
655#define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701 655#define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701
656#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 656#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
657#define USB_DEVICE_ID_MS_NE7K 0x071d
657#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 658#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
658#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c 659#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
659#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799 660#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
@@ -802,6 +803,7 @@
802#define USB_VENDOR_ID_SAITEK 0x06a3 803#define USB_VENDOR_ID_SAITEK 0x06a3
803#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 804#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
804#define USB_DEVICE_ID_SAITEK_PS1000 0x0621 805#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
806#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
805#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 807#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
806#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 808#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
807 809
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index fbaea6eb882e..af935eb198c9 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -264,6 +264,8 @@ static const struct hid_device_id ms_devices[] = {
264 .driver_data = MS_ERGONOMY }, 264 .driver_data = MS_ERGONOMY },
265 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP), 265 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP),
266 .driver_data = MS_ERGONOMY }, 266 .driver_data = MS_ERGONOMY },
267 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K),
268 .driver_data = MS_ERGONOMY },
267 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K), 269 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K),
268 .driver_data = MS_ERGONOMY | MS_RDESC }, 270 .driver_data = MS_ERGONOMY | MS_RDESC },
269 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB), 271 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 5632c54eadf0..a014f21275d8 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -177,6 +177,8 @@ static int saitek_event(struct hid_device *hdev, struct hid_field *field,
177static const struct hid_device_id saitek_devices[] = { 177static const struct hid_device_id saitek_devices[] = {
178 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000), 178 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000),
179 .driver_data = SAITEK_FIX_PS1000 }, 179 .driver_data = SAITEK_FIX_PS1000 },
180 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD),
181 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
180 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7), 182 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
181 .driver_data = SAITEK_RELEASE_MODE_RAT7 }, 183 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
182 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9), 184 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 6a58b6c723aa..e54ce1097e2c 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -135,8 +135,9 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
135{ 135{
136 struct hid_sensor_hub_callbacks_list *callback; 136 struct hid_sensor_hub_callbacks_list *callback;
137 struct sensor_hub_data *pdata = hid_get_drvdata(hdev); 137 struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
138 unsigned long flags;
138 139
139 spin_lock(&pdata->dyn_callback_lock); 140 spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
140 list_for_each_entry(callback, &pdata->dyn_callback_list, list) 141 list_for_each_entry(callback, &pdata->dyn_callback_list, list)
141 if (callback->usage_id == usage_id && 142 if (callback->usage_id == usage_id &&
142 (collection_index >= 143 (collection_index >=
@@ -145,10 +146,11 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
145 callback->hsdev->end_collection_index)) { 146 callback->hsdev->end_collection_index)) {
146 *priv = callback->priv; 147 *priv = callback->priv;
147 *hsdev = callback->hsdev; 148 *hsdev = callback->hsdev;
148 spin_unlock(&pdata->dyn_callback_lock); 149 spin_unlock_irqrestore(&pdata->dyn_callback_lock,
150 flags);
149 return callback->usage_callback; 151 return callback->usage_callback;
150 } 152 }
151 spin_unlock(&pdata->dyn_callback_lock); 153 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
152 154
153 return NULL; 155 return NULL;
154} 156}
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 31e9d2561106..1896c019e302 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -804,7 +804,7 @@ union sixaxis_output_report_01 {
804#define DS4_REPORT_0x81_SIZE 7 804#define DS4_REPORT_0x81_SIZE 7
805#define SIXAXIS_REPORT_0xF2_SIZE 18 805#define SIXAXIS_REPORT_0xF2_SIZE 18
806 806
807static spinlock_t sony_dev_list_lock; 807static DEFINE_SPINLOCK(sony_dev_list_lock);
808static LIST_HEAD(sony_device_list); 808static LIST_HEAD(sony_device_list);
809static DEFINE_IDA(sony_device_id_allocator); 809static DEFINE_IDA(sony_device_id_allocator);
810 810
@@ -1944,6 +1944,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
1944 return -ENOMEM; 1944 return -ENOMEM;
1945 } 1945 }
1946 1946
1947 spin_lock_init(&sc->lock);
1948
1947 sc->quirks = quirks; 1949 sc->quirks = quirks;
1948 hid_set_drvdata(hdev, sc); 1950 hid_set_drvdata(hdev, sc);
1949 sc->hdev = hdev; 1951 sc->hdev = hdev;
@@ -2147,8 +2149,8 @@ static void __exit sony_exit(void)
2147{ 2149{
2148 dbg_hid("Sony:%s\n", __func__); 2150 dbg_hid("Sony:%s\n", __func__);
2149 2151
2150 ida_destroy(&sony_device_id_allocator);
2151 hid_unregister_driver(&sony_driver); 2152 hid_unregister_driver(&sony_driver);
2153 ida_destroy(&sony_device_id_allocator);
2152} 2154}
2153module_init(sony_init); 2155module_init(sony_init);
2154module_exit(sony_exit); 2156module_exit(sony_exit);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d43e967e7533..36053f33d6d9 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -370,7 +370,10 @@ static int i2c_hid_hwreset(struct i2c_client *client)
370static void i2c_hid_get_input(struct i2c_hid *ihid) 370static void i2c_hid_get_input(struct i2c_hid *ihid)
371{ 371{
372 int ret, ret_size; 372 int ret, ret_size;
373 int size = ihid->bufsize; 373 int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
374
375 if (size > ihid->bufsize)
376 size = ihid->bufsize;
374 377
375 ret = i2c_master_recv(ihid->client, ihid->inbuf, size); 378 ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
376 if (ret != size) { 379 if (ret != size) {
@@ -785,7 +788,7 @@ static int i2c_hid_init_irq(struct i2c_client *client)
785 dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq); 788 dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
786 789
787 ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq, 790 ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
788 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 791 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
789 client->name, ihid); 792 client->name, ihid);
790 if (ret < 0) { 793 if (ret < 0) {
791 dev_warn(&client->dev, 794 dev_warn(&client->dev,
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1a6507999a65..046351cf17f3 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -778,6 +778,11 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
778 input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4])); 778 input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4]));
779 input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6])); 779 input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6]));
780 input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8])); 780 input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8]));
781 if ((data[2] & 0x07) | data[4] | data[5] | data[6] | data[7] | data[8] | data[9]) {
782 input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
783 } else {
784 input_report_abs(input, ABS_MISC, 0);
785 }
781 } else if (features->type == CINTIQ_HYBRID) { 786 } else if (features->type == CINTIQ_HYBRID) {
782 /* 787 /*
783 * Do not send hardware buttons under Android. They 788 * Do not send hardware buttons under Android. They
@@ -2725,9 +2730,9 @@ static const struct wacom_features wacom_features_0xF6 =
2725 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10, 2730 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
2726 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; 2731 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
2727static const struct wacom_features wacom_features_0x32A = 2732static const struct wacom_features wacom_features_0x32A =
2728 { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 2733 { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
2729 63, WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 2734 WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
2730 WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 2735 WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
2731static const struct wacom_features wacom_features_0x32B = 2736static const struct wacom_features wacom_features_0x32B =
2732 { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63, 2737 { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
2733 WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 2738 WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index d931cbbed240..110fade9cb74 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1606,7 +1606,7 @@ config SENSORS_W83795
1606 will be called w83795. 1606 will be called w83795.
1607 1607
1608config SENSORS_W83795_FANCTRL 1608config SENSORS_W83795_FANCTRL
1609 boolean "Include automatic fan control support (DANGEROUS)" 1609 bool "Include automatic fan control support (DANGEROUS)"
1610 depends on SENSORS_W83795 1610 depends on SENSORS_W83795
1611 default n 1611 default n
1612 help 1612 help
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index bce4e9ff21bf..6c99ee7bafa3 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -147,6 +147,9 @@ static int ads7828_probe(struct i2c_client *client,
147 &ads2830_regmap_config); 147 &ads2830_regmap_config);
148 } 148 }
149 149
150 if (IS_ERR(data->regmap))
151 return PTR_ERR(data->regmap);
152
150 data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; 153 data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
151 if (!diff_input) 154 if (!diff_input)
152 data->cmd_byte |= ADS7828_CMD_SD_SE; 155 data->cmd_byte |= ADS7828_CMD_SD_SE;
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index a674cd83a4e2..9f7dbd189c97 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -57,7 +57,7 @@ config SENSORS_LTC2978
57 be called ltc2978. 57 be called ltc2978.
58 58
59config SENSORS_LTC2978_REGULATOR 59config SENSORS_LTC2978_REGULATOR
60 boolean "Regulator support for LTC2978 and compatibles" 60 bool "Regulator support for LTC2978 and compatibles"
61 depends on SENSORS_LTC2978 && REGULATOR 61 depends on SENSORS_LTC2978 && REGULATOR
62 help 62 help
63 If you say yes here you get regulator support for Linear 63 If you say yes here you get regulator support for Linear
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 8c9e619f3026..78fbee463628 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -35,11 +35,11 @@ config ACPI_I2C_OPREGION
35if I2C 35if I2C
36 36
37config I2C_BOARDINFO 37config I2C_BOARDINFO
38 boolean 38 bool
39 default y 39 default y
40 40
41config I2C_COMPAT 41config I2C_COMPAT
42 boolean "Enable compatibility bits for old user-space" 42 bool "Enable compatibility bits for old user-space"
43 default y 43 default y
44 help 44 help
45 Say Y here if you intend to run lm-sensors 3.1.1 or older, or any 45 Say Y here if you intend to run lm-sensors 3.1.1 or older, or any
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index ab838d9e28b6..22da9c2ffa22 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -79,7 +79,7 @@ config I2C_AMD8111
79 79
80config I2C_HIX5HD2 80config I2C_HIX5HD2
81 tristate "Hix5hd2 high-speed I2C driver" 81 tristate "Hix5hd2 high-speed I2C driver"
82 depends on ARCH_HIX5HD2 82 depends on ARCH_HIX5HD2 || COMPILE_TEST
83 help 83 help
84 Say Y here to include support for high-speed I2C controller in the 84 Say Y here to include support for high-speed I2C controller in the
85 Hisilicon based hix5hd2 SoCs. 85 Hisilicon based hix5hd2 SoCs.
@@ -372,6 +372,16 @@ config I2C_BCM2835
372 This support is also available as a module. If so, the module 372 This support is also available as a module. If so, the module
373 will be called i2c-bcm2835. 373 will be called i2c-bcm2835.
374 374
375config I2C_BCM_IPROC
376 tristate "Broadcom iProc I2C controller"
377 depends on ARCH_BCM_IPROC || COMPILE_TEST
378 default ARCH_BCM_IPROC
379 help
380 If you say yes to this option, support will be included for the
381 Broadcom iProc I2C controller.
382
383 If you don't know what to do here, say N.
384
375config I2C_BCM_KONA 385config I2C_BCM_KONA
376 tristate "BCM Kona I2C adapter" 386 tristate "BCM Kona I2C adapter"
377 depends on ARCH_BCM_MOBILE 387 depends on ARCH_BCM_MOBILE
@@ -465,6 +475,16 @@ config I2C_DESIGNWARE_PCI
465 This driver can also be built as a module. If so, the module 475 This driver can also be built as a module. If so, the module
466 will be called i2c-designware-pci. 476 will be called i2c-designware-pci.
467 477
478config I2C_DESIGNWARE_BAYTRAIL
479 bool "Intel Baytrail I2C semaphore support"
480 depends on I2C_DESIGNWARE_PLATFORM && IOSF_MBI=y && ACPI
481 help
482 This driver enables managed host access to the PMIC I2C bus on select
483 Intel BayTrail platforms using the X-Powers AXP288 PMIC. It allows
484 the host to request uninterrupted access to the PMIC's I2C bus from
485 the platform firmware controlling it. You should say Y if running on
486 a BayTrail system using the AXP288.
487
468config I2C_EFM32 488config I2C_EFM32
469 tristate "EFM32 I2C controller" 489 tristate "EFM32 I2C controller"
470 depends on ARCH_EFM32 || COMPILE_TEST 490 depends on ARCH_EFM32 || COMPILE_TEST
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 56388f658d2f..3638feb6677e 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_I2C_AT91) += i2c-at91.o
33obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o 33obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
34obj-$(CONFIG_I2C_AXXIA) += i2c-axxia.o 34obj-$(CONFIG_I2C_AXXIA) += i2c-axxia.o
35obj-$(CONFIG_I2C_BCM2835) += i2c-bcm2835.o 35obj-$(CONFIG_I2C_BCM2835) += i2c-bcm2835.o
36obj-$(CONFIG_I2C_BCM_IPROC) += i2c-bcm-iproc.o
36obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o 37obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
37obj-$(CONFIG_I2C_CADENCE) += i2c-cadence.o 38obj-$(CONFIG_I2C_CADENCE) += i2c-cadence.o
38obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o 39obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o
@@ -41,6 +42,7 @@ obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
41obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o 42obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o
42obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o 43obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o
43i2c-designware-platform-objs := i2c-designware-platdrv.o 44i2c-designware-platform-objs := i2c-designware-platdrv.o
45i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o
44obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o 46obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
45i2c-designware-pci-objs := i2c-designware-pcidrv.o 47i2c-designware-pci-objs := i2c-designware-pcidrv.o
46obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o 48obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
new file mode 100644
index 000000000000..d3c89157b337
--- /dev/null
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -0,0 +1,461 @@
1/*
2 * Copyright (C) 2014 Broadcom Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/delay.h>
15#include <linux/i2c.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22
23#define CFG_OFFSET 0x00
24#define CFG_RESET_SHIFT 31
25#define CFG_EN_SHIFT 30
26#define CFG_M_RETRY_CNT_SHIFT 16
27#define CFG_M_RETRY_CNT_MASK 0x0f
28
29#define TIM_CFG_OFFSET 0x04
30#define TIM_CFG_MODE_400_SHIFT 31
31
32#define M_FIFO_CTRL_OFFSET 0x0c
33#define M_FIFO_RX_FLUSH_SHIFT 31
34#define M_FIFO_TX_FLUSH_SHIFT 30
35#define M_FIFO_RX_CNT_SHIFT 16
36#define M_FIFO_RX_CNT_MASK 0x7f
37#define M_FIFO_RX_THLD_SHIFT 8
38#define M_FIFO_RX_THLD_MASK 0x3f
39
40#define M_CMD_OFFSET 0x30
41#define M_CMD_START_BUSY_SHIFT 31
42#define M_CMD_STATUS_SHIFT 25
43#define M_CMD_STATUS_MASK 0x07
44#define M_CMD_STATUS_SUCCESS 0x0
45#define M_CMD_STATUS_LOST_ARB 0x1
46#define M_CMD_STATUS_NACK_ADDR 0x2
47#define M_CMD_STATUS_NACK_DATA 0x3
48#define M_CMD_STATUS_TIMEOUT 0x4
49#define M_CMD_PROTOCOL_SHIFT 9
50#define M_CMD_PROTOCOL_MASK 0xf
51#define M_CMD_PROTOCOL_BLK_WR 0x7
52#define M_CMD_PROTOCOL_BLK_RD 0x8
53#define M_CMD_PEC_SHIFT 8
54#define M_CMD_RD_CNT_SHIFT 0
55#define M_CMD_RD_CNT_MASK 0xff
56
57#define IE_OFFSET 0x38
58#define IE_M_RX_FIFO_FULL_SHIFT 31
59#define IE_M_RX_THLD_SHIFT 30
60#define IE_M_START_BUSY_SHIFT 28
61
62#define IS_OFFSET 0x3c
63#define IS_M_RX_FIFO_FULL_SHIFT 31
64#define IS_M_RX_THLD_SHIFT 30
65#define IS_M_START_BUSY_SHIFT 28
66
67#define M_TX_OFFSET 0x40
68#define M_TX_WR_STATUS_SHIFT 31
69#define M_TX_DATA_SHIFT 0
70#define M_TX_DATA_MASK 0xff
71
72#define M_RX_OFFSET 0x44
73#define M_RX_STATUS_SHIFT 30
74#define M_RX_STATUS_MASK 0x03
75#define M_RX_PEC_ERR_SHIFT 29
76#define M_RX_DATA_SHIFT 0
77#define M_RX_DATA_MASK 0xff
78
79#define I2C_TIMEOUT_MESC 100
80#define M_TX_RX_FIFO_SIZE 64
81
82enum bus_speed_index {
83 I2C_SPD_100K = 0,
84 I2C_SPD_400K,
85};
86
87struct bcm_iproc_i2c_dev {
88 struct device *device;
89 int irq;
90
91 void __iomem *base;
92
93 struct i2c_adapter adapter;
94
95 struct completion done;
96 int xfer_is_done;
97};
98
99/*
100 * Can be expanded in the future if more interrupt status bits are utilized
101 */
102#define ISR_MASK (1 << IS_M_START_BUSY_SHIFT)
103
104static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
105{
106 struct bcm_iproc_i2c_dev *iproc_i2c = data;
107 u32 status = readl(iproc_i2c->base + IS_OFFSET);
108
109 status &= ISR_MASK;
110
111 if (!status)
112 return IRQ_NONE;
113
114 writel(status, iproc_i2c->base + IS_OFFSET);
115 iproc_i2c->xfer_is_done = 1;
116 complete_all(&iproc_i2c->done);
117
118 return IRQ_HANDLED;
119}
120
121static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
122 struct i2c_msg *msg)
123{
124 u32 val;
125
126 val = readl(iproc_i2c->base + M_CMD_OFFSET);
127 val = (val >> M_CMD_STATUS_SHIFT) & M_CMD_STATUS_MASK;
128
129 switch (val) {
130 case M_CMD_STATUS_SUCCESS:
131 return 0;
132
133 case M_CMD_STATUS_LOST_ARB:
134 dev_dbg(iproc_i2c->device, "lost bus arbitration\n");
135 return -EAGAIN;
136
137 case M_CMD_STATUS_NACK_ADDR:
138 dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr);
139 return -ENXIO;
140
141 case M_CMD_STATUS_NACK_DATA:
142 dev_dbg(iproc_i2c->device, "NAK data\n");
143 return -ENXIO;
144
145 case M_CMD_STATUS_TIMEOUT:
146 dev_dbg(iproc_i2c->device, "bus timeout\n");
147 return -ETIMEDOUT;
148
149 default:
150 dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val);
151 return -EIO;
152 }
153}
154
155static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
156 struct i2c_msg *msg)
157{
158 int ret, i;
159 u8 addr;
160 u32 val;
161 unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MESC);
162
163 /* need to reserve one byte in the FIFO for the slave address */
164 if (msg->len > M_TX_RX_FIFO_SIZE - 1) {
165 dev_err(iproc_i2c->device,
166 "only support data length up to %u bytes\n",
167 M_TX_RX_FIFO_SIZE - 1);
168 return -EOPNOTSUPP;
169 }
170
171 /* check if bus is busy */
172 if (!!(readl(iproc_i2c->base + M_CMD_OFFSET) &
173 BIT(M_CMD_START_BUSY_SHIFT))) {
174 dev_warn(iproc_i2c->device, "bus is busy\n");
175 return -EBUSY;
176 }
177
178 /* format and load slave address into the TX FIFO */
179 addr = msg->addr << 1 | (msg->flags & I2C_M_RD ? 1 : 0);
180 writel(addr, iproc_i2c->base + M_TX_OFFSET);
181
182 /* for a write transaction, load data into the TX FIFO */
183 if (!(msg->flags & I2C_M_RD)) {
184 for (i = 0; i < msg->len; i++) {
185 val = msg->buf[i];
186
187 /* mark the last byte */
188 if (i == msg->len - 1)
189 val |= 1 << M_TX_WR_STATUS_SHIFT;
190
191 writel(val, iproc_i2c->base + M_TX_OFFSET);
192 }
193 }
194
195 /* mark as incomplete before starting the transaction */
196 reinit_completion(&iproc_i2c->done);
197 iproc_i2c->xfer_is_done = 0;
198
199 /*
200 * Enable the "start busy" interrupt, which will be triggered after the
201 * transaction is done, i.e., the internal start_busy bit, transitions
202 * from 1 to 0.
203 */
204 writel(1 << IE_M_START_BUSY_SHIFT, iproc_i2c->base + IE_OFFSET);
205
206 /*
207 * Now we can activate the transfer. For a read operation, specify the
208 * number of bytes to read
209 */
210 val = 1 << M_CMD_START_BUSY_SHIFT;
211 if (msg->flags & I2C_M_RD) {
212 val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) |
213 (msg->len << M_CMD_RD_CNT_SHIFT);
214 } else {
215 val |= (M_CMD_PROTOCOL_BLK_WR << M_CMD_PROTOCOL_SHIFT);
216 }
217 writel(val, iproc_i2c->base + M_CMD_OFFSET);
218
219 time_left = wait_for_completion_timeout(&iproc_i2c->done, time_left);
220
221 /* disable all interrupts */
222 writel(0, iproc_i2c->base + IE_OFFSET);
223 /* read it back to flush the write */
224 readl(iproc_i2c->base + IE_OFFSET);
225
226 /* make sure the interrupt handler isn't running */
227 synchronize_irq(iproc_i2c->irq);
228
229 if (!time_left && !iproc_i2c->xfer_is_done) {
230 dev_err(iproc_i2c->device, "transaction timed out\n");
231
232 /* flush FIFOs */
233 val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
234 (1 << M_FIFO_TX_FLUSH_SHIFT);
235 writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
236 return -ETIMEDOUT;
237 }
238
239 ret = bcm_iproc_i2c_check_status(iproc_i2c, msg);
240 if (ret) {
241 /* flush both TX/RX FIFOs */
242 val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
243 (1 << M_FIFO_TX_FLUSH_SHIFT);
244 writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
245 return ret;
246 }
247
248 /*
249 * For a read operation, we now need to load the data from FIFO
250 * into the memory buffer
251 */
252 if (msg->flags & I2C_M_RD) {
253 for (i = 0; i < msg->len; i++) {
254 msg->buf[i] = (readl(iproc_i2c->base + M_RX_OFFSET) >>
255 M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
256 }
257 }
258
259 return 0;
260}
261
262static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
263 struct i2c_msg msgs[], int num)
264{
265 struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(adapter);
266 int ret, i;
267
268 /* go through all messages */
269 for (i = 0; i < num; i++) {
270 ret = bcm_iproc_i2c_xfer_single_msg(iproc_i2c, &msgs[i]);
271 if (ret) {
272 dev_dbg(iproc_i2c->device, "xfer failed\n");
273 return ret;
274 }
275 }
276
277 return num;
278}
279
280static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
281{
282 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
283}
284
285static const struct i2c_algorithm bcm_iproc_algo = {
286 .master_xfer = bcm_iproc_i2c_xfer,
287 .functionality = bcm_iproc_i2c_functionality,
288};
289
290static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
291{
292 unsigned int bus_speed;
293 u32 val;
294 int ret = of_property_read_u32(iproc_i2c->device->of_node,
295 "clock-frequency", &bus_speed);
296 if (ret < 0) {
297 dev_info(iproc_i2c->device,
298 "unable to interpret clock-frequency DT property\n");
299 bus_speed = 100000;
300 }
301
302 if (bus_speed < 100000) {
303 dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n",
304 bus_speed);
305 dev_err(iproc_i2c->device,
306 "valid speeds are 100khz and 400khz\n");
307 return -EINVAL;
308 } else if (bus_speed < 400000) {
309 bus_speed = 100000;
310 } else {
311 bus_speed = 400000;
312 }
313
314 val = readl(iproc_i2c->base + TIM_CFG_OFFSET);
315 val &= ~(1 << TIM_CFG_MODE_400_SHIFT);
316 val |= (bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT;
317 writel(val, iproc_i2c->base + TIM_CFG_OFFSET);
318
319 dev_info(iproc_i2c->device, "bus set to %u Hz\n", bus_speed);
320
321 return 0;
322}
323
324static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
325{
326 u32 val;
327
328 /* put controller in reset */
329 val = readl(iproc_i2c->base + CFG_OFFSET);
330 val |= 1 << CFG_RESET_SHIFT;
331 val &= ~(1 << CFG_EN_SHIFT);
332 writel(val, iproc_i2c->base + CFG_OFFSET);
333
334 /* wait 100 usec per spec */
335 udelay(100);
336
337 /* bring controller out of reset */
338 val &= ~(1 << CFG_RESET_SHIFT);
339 writel(val, iproc_i2c->base + CFG_OFFSET);
340
341 /* flush TX/RX FIFOs and set RX FIFO threshold to zero */
342 val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT);
343 writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
344
345 /* disable all interrupts */
346 writel(0, iproc_i2c->base + IE_OFFSET);
347
348 /* clear all pending interrupts */
349 writel(0xffffffff, iproc_i2c->base + IS_OFFSET);
350
351 return 0;
352}
353
354static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
355 bool enable)
356{
357 u32 val;
358
359 val = readl(iproc_i2c->base + CFG_OFFSET);
360 if (enable)
361 val |= BIT(CFG_EN_SHIFT);
362 else
363 val &= ~BIT(CFG_EN_SHIFT);
364 writel(val, iproc_i2c->base + CFG_OFFSET);
365}
366
367static int bcm_iproc_i2c_probe(struct platform_device *pdev)
368{
369 int irq, ret = 0;
370 struct bcm_iproc_i2c_dev *iproc_i2c;
371 struct i2c_adapter *adap;
372 struct resource *res;
373
374 iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c),
375 GFP_KERNEL);
376 if (!iproc_i2c)
377 return -ENOMEM;
378
379 platform_set_drvdata(pdev, iproc_i2c);
380 iproc_i2c->device = &pdev->dev;
381 init_completion(&iproc_i2c->done);
382
383 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
384 iproc_i2c->base = devm_ioremap_resource(iproc_i2c->device, res);
385 if (IS_ERR(iproc_i2c->base))
386 return PTR_ERR(iproc_i2c->base);
387
388 ret = bcm_iproc_i2c_init(iproc_i2c);
389 if (ret)
390 return ret;
391
392 ret = bcm_iproc_i2c_cfg_speed(iproc_i2c);
393 if (ret)
394 return ret;
395
396 irq = platform_get_irq(pdev, 0);
397 if (irq <= 0) {
398 dev_err(iproc_i2c->device, "no irq resource\n");
399 return irq;
400 }
401 iproc_i2c->irq = irq;
402
403 ret = devm_request_irq(iproc_i2c->device, irq, bcm_iproc_i2c_isr, 0,
404 pdev->name, iproc_i2c);
405 if (ret < 0) {
406 dev_err(iproc_i2c->device, "unable to request irq %i\n", irq);
407 return ret;
408 }
409
410 bcm_iproc_i2c_enable_disable(iproc_i2c, true);
411
412 adap = &iproc_i2c->adapter;
413 i2c_set_adapdata(adap, iproc_i2c);
414 strlcpy(adap->name, "Broadcom iProc I2C adapter", sizeof(adap->name));
415 adap->algo = &bcm_iproc_algo;
416 adap->dev.parent = &pdev->dev;
417 adap->dev.of_node = pdev->dev.of_node;
418
419 ret = i2c_add_adapter(adap);
420 if (ret) {
421 dev_err(iproc_i2c->device, "failed to add adapter\n");
422 return ret;
423 }
424
425 return 0;
426}
427
428static int bcm_iproc_i2c_remove(struct platform_device *pdev)
429{
430 struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev);
431
432 /* make sure there's no pending interrupt when we remove the adapter */
433 writel(0, iproc_i2c->base + IE_OFFSET);
434 readl(iproc_i2c->base + IE_OFFSET);
435 synchronize_irq(iproc_i2c->irq);
436
437 i2c_del_adapter(&iproc_i2c->adapter);
438 bcm_iproc_i2c_enable_disable(iproc_i2c, false);
439
440 return 0;
441}
442
443static const struct of_device_id bcm_iproc_i2c_of_match[] = {
444 { .compatible = "brcm,iproc-i2c" },
445 { /* sentinel */ }
446};
447MODULE_DEVICE_TABLE(of, bcm_iproc_i2c_of_match);
448
449static struct platform_driver bcm_iproc_i2c_driver = {
450 .driver = {
451 .name = "bcm-iproc-i2c",
452 .of_match_table = bcm_iproc_i2c_of_match,
453 },
454 .probe = bcm_iproc_i2c_probe,
455 .remove = bcm_iproc_i2c_remove,
456};
457module_platform_driver(bcm_iproc_i2c_driver);
458
459MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
460MODULE_DESCRIPTION("Broadcom iProc I2C Driver");
461MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 626f74ecd4be..7d7a14cdadfb 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -128,6 +128,7 @@
128 * @suspended: Flag holding the device's PM status 128 * @suspended: Flag holding the device's PM status
129 * @send_count: Number of bytes still expected to send 129 * @send_count: Number of bytes still expected to send
130 * @recv_count: Number of bytes still expected to receive 130 * @recv_count: Number of bytes still expected to receive
131 * @curr_recv_count: Number of bytes to be received in current transfer
131 * @irq: IRQ number 132 * @irq: IRQ number
132 * @input_clk: Input clock to I2C controller 133 * @input_clk: Input clock to I2C controller
133 * @i2c_clk: Maximum I2C clock speed 134 * @i2c_clk: Maximum I2C clock speed
@@ -146,6 +147,7 @@ struct cdns_i2c {
146 u8 suspended; 147 u8 suspended;
147 unsigned int send_count; 148 unsigned int send_count;
148 unsigned int recv_count; 149 unsigned int recv_count;
150 unsigned int curr_recv_count;
149 int irq; 151 int irq;
150 unsigned long input_clk; 152 unsigned long input_clk;
151 unsigned int i2c_clk; 153 unsigned int i2c_clk;
@@ -182,14 +184,15 @@ static void cdns_i2c_clear_bus_hold(struct cdns_i2c *id)
182 */ 184 */
183static irqreturn_t cdns_i2c_isr(int irq, void *ptr) 185static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
184{ 186{
185 unsigned int isr_status, avail_bytes; 187 unsigned int isr_status, avail_bytes, updatetx;
186 unsigned int bytes_to_recv, bytes_to_send; 188 unsigned int bytes_to_send;
187 struct cdns_i2c *id = ptr; 189 struct cdns_i2c *id = ptr;
188 /* Signal completion only after everything is updated */ 190 /* Signal completion only after everything is updated */
189 int done_flag = 0; 191 int done_flag = 0;
190 irqreturn_t status = IRQ_NONE; 192 irqreturn_t status = IRQ_NONE;
191 193
192 isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); 194 isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
195 cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
193 196
194 /* Handling nack and arbitration lost interrupt */ 197 /* Handling nack and arbitration lost interrupt */
195 if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) { 198 if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) {
@@ -197,89 +200,112 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
197 status = IRQ_HANDLED; 200 status = IRQ_HANDLED;
198 } 201 }
199 202
200 /* Handling Data interrupt */ 203 /*
201 if ((isr_status & CDNS_I2C_IXR_DATA) && 204 * Check if transfer size register needs to be updated again for a
202 (id->recv_count >= CDNS_I2C_DATA_INTR_DEPTH)) { 205 * large data receive operation.
203 /* Always read data interrupt threshold bytes */ 206 */
204 bytes_to_recv = CDNS_I2C_DATA_INTR_DEPTH; 207 updatetx = 0;
205 id->recv_count -= CDNS_I2C_DATA_INTR_DEPTH; 208 if (id->recv_count > id->curr_recv_count)
206 avail_bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); 209 updatetx = 1;
207 210
208 /* 211 /* When receiving, handle data interrupt and completion interrupt */
209 * if the tranfer size register value is zero, then 212 if (id->p_recv_buf &&
210 * check for the remaining bytes and update the 213 ((isr_status & CDNS_I2C_IXR_COMP) ||
211 * transfer size register. 214 (isr_status & CDNS_I2C_IXR_DATA))) {
212 */ 215 /* Read data if receive data valid is set */
213 if (!avail_bytes) { 216 while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
214 if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) 217 CDNS_I2C_SR_RXDV) {
215 cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE, 218 /*
216 CDNS_I2C_XFER_SIZE_OFFSET); 219 * Clear hold bit that was set for FIFO control if
217 else 220 * RX data left is less than FIFO depth, unless
218 cdns_i2c_writereg(id->recv_count, 221 * repeated start is selected.
219 CDNS_I2C_XFER_SIZE_OFFSET); 222 */
220 } 223 if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) &&
224 !id->bus_hold_flag)
225 cdns_i2c_clear_bus_hold(id);
221 226
222 /* Process the data received */
223 while (bytes_to_recv--)
224 *(id->p_recv_buf)++ = 227 *(id->p_recv_buf)++ =
225 cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); 228 cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
229 id->recv_count--;
230 id->curr_recv_count--;
226 231
227 if (!id->bus_hold_flag && 232 if (updatetx &&
228 (id->recv_count <= CDNS_I2C_FIFO_DEPTH)) 233 (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1))
229 cdns_i2c_clear_bus_hold(id); 234 break;
235 }
230 236
231 status = IRQ_HANDLED; 237 /*
232 } 238 * The controller sends NACK to the slave when transfer size
239 * register reaches zero without considering the HOLD bit.
240 * This workaround is implemented for large data transfers to
241 * maintain transfer size non-zero while performing a large
242 * receive operation.
243 */
244 if (updatetx &&
245 (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1)) {
246 /* wait while fifo is full */
247 while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
248 (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
249 ;
233 250
234 /* Handling Transfer Complete interrupt */
235 if (isr_status & CDNS_I2C_IXR_COMP) {
236 if (!id->p_recv_buf) {
237 /* 251 /*
238 * If the device is sending data If there is further 252 * Check number of bytes to be received against maximum
239 * data to be sent. Calculate the available space 253 * transfer size and update register accordingly.
240 * in FIFO and fill the FIFO with that many bytes.
241 */ 254 */
242 if (id->send_count) { 255 if (((int)(id->recv_count) - CDNS_I2C_FIFO_DEPTH) >
243 avail_bytes = CDNS_I2C_FIFO_DEPTH - 256 CDNS_I2C_TRANSFER_SIZE) {
244 cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); 257 cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
245 if (id->send_count > avail_bytes) 258 CDNS_I2C_XFER_SIZE_OFFSET);
246 bytes_to_send = avail_bytes; 259 id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE +
247 else 260 CDNS_I2C_FIFO_DEPTH;
248 bytes_to_send = id->send_count;
249
250 while (bytes_to_send--) {
251 cdns_i2c_writereg(
252 (*(id->p_send_buf)++),
253 CDNS_I2C_DATA_OFFSET);
254 id->send_count--;
255 }
256 } else { 261 } else {
257 /* 262 cdns_i2c_writereg(id->recv_count -
258 * Signal the completion of transaction and 263 CDNS_I2C_FIFO_DEPTH,
259 * clear the hold bus bit if there are no 264 CDNS_I2C_XFER_SIZE_OFFSET);
260 * further messages to be processed. 265 id->curr_recv_count = id->recv_count;
261 */
262 done_flag = 1;
263 } 266 }
264 if (!id->send_count && !id->bus_hold_flag) 267 }
265 cdns_i2c_clear_bus_hold(id); 268
266 } else { 269 /* Clear hold (if not repeated start) and signal completion */
270 if ((isr_status & CDNS_I2C_IXR_COMP) && !id->recv_count) {
267 if (!id->bus_hold_flag) 271 if (!id->bus_hold_flag)
268 cdns_i2c_clear_bus_hold(id); 272 cdns_i2c_clear_bus_hold(id);
273 done_flag = 1;
274 }
275
276 status = IRQ_HANDLED;
277 }
278
279 /* When sending, handle transfer complete interrupt */
280 if ((isr_status & CDNS_I2C_IXR_COMP) && !id->p_recv_buf) {
281 /*
282 * If there is more data to be sent, calculate the
283 * space available in FIFO and fill with that many bytes.
284 */
285 if (id->send_count) {
286 avail_bytes = CDNS_I2C_FIFO_DEPTH -
287 cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
288 if (id->send_count > avail_bytes)
289 bytes_to_send = avail_bytes;
290 else
291 bytes_to_send = id->send_count;
292
293 while (bytes_to_send--) {
294 cdns_i2c_writereg(
295 (*(id->p_send_buf)++),
296 CDNS_I2C_DATA_OFFSET);
297 id->send_count--;
298 }
299 } else {
269 /* 300 /*
270 * If the device is receiving data, then signal 301 * Signal the completion of transaction and
271 * the completion of transaction and read the data 302 * clear the hold bus bit if there are no
272 * present in the FIFO. Signal the completion of 303 * further messages to be processed.
273 * transaction.
274 */ 304 */
275 while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
276 CDNS_I2C_SR_RXDV) {
277 *(id->p_recv_buf)++ =
278 cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
279 id->recv_count--;
280 }
281 done_flag = 1; 305 done_flag = 1;
282 } 306 }
307 if (!id->send_count && !id->bus_hold_flag)
308 cdns_i2c_clear_bus_hold(id);
283 309
284 status = IRQ_HANDLED; 310 status = IRQ_HANDLED;
285 } 311 }
@@ -289,8 +315,6 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
289 if (id->err_status) 315 if (id->err_status)
290 status = IRQ_HANDLED; 316 status = IRQ_HANDLED;
291 317
292 cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
293
294 if (done_flag) 318 if (done_flag)
295 complete(&id->xfer_done); 319 complete(&id->xfer_done);
296 320
@@ -316,6 +340,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
316 if (id->p_msg->flags & I2C_M_RECV_LEN) 340 if (id->p_msg->flags & I2C_M_RECV_LEN)
317 id->recv_count = I2C_SMBUS_BLOCK_MAX + 1; 341 id->recv_count = I2C_SMBUS_BLOCK_MAX + 1;
318 342
343 id->curr_recv_count = id->recv_count;
344
319 /* 345 /*
320 * Check for the message size against FIFO depth and set the 346 * Check for the message size against FIFO depth and set the
321 * 'hold bus' bit if it is greater than FIFO depth. 347 * 'hold bus' bit if it is greater than FIFO depth.
@@ -335,11 +361,14 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
335 * receive if it is less than transfer size and transfer size if 361 * receive if it is less than transfer size and transfer size if
336 * it is more. Enable the interrupts. 362 * it is more. Enable the interrupts.
337 */ 363 */
338 if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) 364 if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
339 cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE, 365 cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
340 CDNS_I2C_XFER_SIZE_OFFSET); 366 CDNS_I2C_XFER_SIZE_OFFSET);
341 else 367 id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
368 } else {
342 cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET); 369 cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET);
370 }
371
343 /* Clear the bus hold flag if bytes to receive is less than FIFO size */ 372 /* Clear the bus hold flag if bytes to receive is less than FIFO size */
344 if (!id->bus_hold_flag && 373 if (!id->bus_hold_flag &&
345 ((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) && 374 ((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) &&
@@ -516,6 +545,20 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
516 * processed with a repeated start. 545 * processed with a repeated start.
517 */ 546 */
518 if (num > 1) { 547 if (num > 1) {
548 /*
549 * This controller does not give completion interrupt after a
550 * master receive message if HOLD bit is set (repeated start),
551 * resulting in SW timeout. Hence, if a receive message is
552 * followed by any other message, an error is returned
553 * indicating that this sequence is not supported.
554 */
555 for (count = 0; count < num - 1; count++) {
556 if (msgs[count].flags & I2C_M_RD) {
557 dev_warn(adap->dev.parent,
558 "Can't do repeated start after a receive message\n");
559 return -EOPNOTSUPP;
560 }
561 }
519 id->bus_hold_flag = 1; 562 id->bus_hold_flag = 1;
520 reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); 563 reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
521 reg |= CDNS_I2C_CR_HOLD; 564 reg |= CDNS_I2C_CR_HOLD;
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
new file mode 100644
index 000000000000..5f1ff4cc5c34
--- /dev/null
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -0,0 +1,160 @@
1/*
2 * Intel BayTrail PMIC I2C bus semaphore implementaion
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/delay.h>
16#include <linux/device.h>
17#include <linux/acpi.h>
18#include <linux/i2c.h>
19#include <linux/interrupt.h>
20#include <asm/iosf_mbi.h>
21#include "i2c-designware-core.h"
22
23#define SEMAPHORE_TIMEOUT 100
24#define PUNIT_SEMAPHORE 0x7
25
26static unsigned long acquired;
27
28static int get_sem(struct device *dev, u32 *sem)
29{
30 u32 reg_val;
31 int ret;
32
33 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE,
34 &reg_val);
35 if (ret) {
36 dev_err(dev, "iosf failed to read punit semaphore\n");
37 return ret;
38 }
39
40 *sem = reg_val & 0x1;
41
42 return 0;
43}
44
45static void reset_semaphore(struct device *dev)
46{
47 u32 data;
48
49 if (iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
50 PUNIT_SEMAPHORE, &data)) {
51 dev_err(dev, "iosf failed to reset punit semaphore during read\n");
52 return;
53 }
54
55 data = data & 0xfffffffe;
56 if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
57 PUNIT_SEMAPHORE, data))
58 dev_err(dev, "iosf failed to reset punit semaphore during write\n");
59}
60
61int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
62{
63 u32 sem = 0;
64 int ret;
65 unsigned long start, end;
66
67 if (!dev || !dev->dev)
68 return -ENODEV;
69
70 if (!dev->acquire_lock)
71 return 0;
72
73 /* host driver writes 0x2 to side band semaphore register */
74 ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
75 PUNIT_SEMAPHORE, 0x2);
76 if (ret) {
77 dev_err(dev->dev, "iosf punit semaphore request failed\n");
78 return ret;
79 }
80
81 /* host driver waits for bit 0 to be set in semaphore register */
82 start = jiffies;
83 end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
84 while (!time_after(jiffies, end)) {
85 ret = get_sem(dev->dev, &sem);
86 if (!ret && sem) {
87 acquired = jiffies;
88 dev_dbg(dev->dev, "punit semaphore acquired after %ums\n",
89 jiffies_to_msecs(jiffies - start));
90 return 0;
91 }
92
93 usleep_range(1000, 2000);
94 }
95
96 dev_err(dev->dev, "punit semaphore timed out, resetting\n");
97 reset_semaphore(dev->dev);
98
99 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
100 PUNIT_SEMAPHORE, &sem);
101 if (!ret)
102 dev_err(dev->dev, "iosf failed to read punit semaphore\n");
103 else
104 dev_err(dev->dev, "PUNIT SEM: %d\n", sem);
105
106 WARN_ON(1);
107
108 return -ETIMEDOUT;
109}
110EXPORT_SYMBOL(baytrail_i2c_acquire);
111
112void baytrail_i2c_release(struct dw_i2c_dev *dev)
113{
114 if (!dev || !dev->dev)
115 return;
116
117 if (!dev->acquire_lock)
118 return;
119
120 reset_semaphore(dev->dev);
121 dev_dbg(dev->dev, "punit semaphore held for %ums\n",
122 jiffies_to_msecs(jiffies - acquired));
123}
124EXPORT_SYMBOL(baytrail_i2c_release);
125
126int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
127{
128 acpi_status status;
129 unsigned long long shared_host = 0;
130 acpi_handle handle;
131
132 if (!dev || !dev->dev)
133 return 0;
134
135 handle = ACPI_HANDLE(dev->dev);
136 if (!handle)
137 return 0;
138
139 status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
140
141 if (ACPI_FAILURE(status))
142 return 0;
143
144 if (shared_host) {
145 dev_info(dev->dev, "I2C bus managed by PUNIT\n");
146 dev->acquire_lock = baytrail_i2c_acquire;
147 dev->release_lock = baytrail_i2c_release;
148 dev->pm_runtime_disabled = true;
149 }
150
151 if (!iosf_mbi_available())
152 return -EPROBE_DEFER;
153
154 return 0;
155}
156EXPORT_SYMBOL(i2c_dw_eval_lock_support);
157
158MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
159MODULE_DESCRIPTION("Baytrail I2C Semaphore driver");
160MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 23628b7bfb8d..6e25c010e690 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -170,10 +170,10 @@ u32 dw_readl(struct dw_i2c_dev *dev, int offset)
170 u32 value; 170 u32 value;
171 171
172 if (dev->accessor_flags & ACCESS_16BIT) 172 if (dev->accessor_flags & ACCESS_16BIT)
173 value = readw(dev->base + offset) | 173 value = readw_relaxed(dev->base + offset) |
174 (readw(dev->base + offset + 2) << 16); 174 (readw_relaxed(dev->base + offset + 2) << 16);
175 else 175 else
176 value = readl(dev->base + offset); 176 value = readl_relaxed(dev->base + offset);
177 177
178 if (dev->accessor_flags & ACCESS_SWAP) 178 if (dev->accessor_flags & ACCESS_SWAP)
179 return swab32(value); 179 return swab32(value);
@@ -187,10 +187,10 @@ void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
187 b = swab32(b); 187 b = swab32(b);
188 188
189 if (dev->accessor_flags & ACCESS_16BIT) { 189 if (dev->accessor_flags & ACCESS_16BIT) {
190 writew((u16)b, dev->base + offset); 190 writew_relaxed((u16)b, dev->base + offset);
191 writew((u16)(b >> 16), dev->base + offset + 2); 191 writew_relaxed((u16)(b >> 16), dev->base + offset + 2);
192 } else { 192 } else {
193 writel(b, dev->base + offset); 193 writel_relaxed(b, dev->base + offset);
194 } 194 }
195} 195}
196 196
@@ -285,6 +285,15 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
285 u32 hcnt, lcnt; 285 u32 hcnt, lcnt;
286 u32 reg; 286 u32 reg;
287 u32 sda_falling_time, scl_falling_time; 287 u32 sda_falling_time, scl_falling_time;
288 int ret;
289
290 if (dev->acquire_lock) {
291 ret = dev->acquire_lock(dev);
292 if (ret) {
293 dev_err(dev->dev, "couldn't acquire bus ownership\n");
294 return ret;
295 }
296 }
288 297
289 input_clock_khz = dev->get_clk_rate_khz(dev); 298 input_clock_khz = dev->get_clk_rate_khz(dev);
290 299
@@ -298,6 +307,8 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
298 } else if (reg != DW_IC_COMP_TYPE_VALUE) { 307 } else if (reg != DW_IC_COMP_TYPE_VALUE) {
299 dev_err(dev->dev, "Unknown Synopsys component type: " 308 dev_err(dev->dev, "Unknown Synopsys component type: "
300 "0x%08x\n", reg); 309 "0x%08x\n", reg);
310 if (dev->release_lock)
311 dev->release_lock(dev);
301 return -ENODEV; 312 return -ENODEV;
302 } 313 }
303 314
@@ -309,40 +320,39 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
309 sda_falling_time = dev->sda_falling_time ?: 300; /* ns */ 320 sda_falling_time = dev->sda_falling_time ?: 300; /* ns */
310 scl_falling_time = dev->scl_falling_time ?: 300; /* ns */ 321 scl_falling_time = dev->scl_falling_time ?: 300; /* ns */
311 322
312 /* Standard-mode */ 323 /* Set SCL timing parameters for standard-mode */
313 hcnt = i2c_dw_scl_hcnt(input_clock_khz,
314 4000, /* tHD;STA = tHIGH = 4.0 us */
315 sda_falling_time,
316 0, /* 0: DW default, 1: Ideal */
317 0); /* No offset */
318 lcnt = i2c_dw_scl_lcnt(input_clock_khz,
319 4700, /* tLOW = 4.7 us */
320 scl_falling_time,
321 0); /* No offset */
322
323 /* Allow platforms to specify the ideal HCNT and LCNT values */
324 if (dev->ss_hcnt && dev->ss_lcnt) { 324 if (dev->ss_hcnt && dev->ss_lcnt) {
325 hcnt = dev->ss_hcnt; 325 hcnt = dev->ss_hcnt;
326 lcnt = dev->ss_lcnt; 326 lcnt = dev->ss_lcnt;
327 } else {
328 hcnt = i2c_dw_scl_hcnt(input_clock_khz,
329 4000, /* tHD;STA = tHIGH = 4.0 us */
330 sda_falling_time,
331 0, /* 0: DW default, 1: Ideal */
332 0); /* No offset */
333 lcnt = i2c_dw_scl_lcnt(input_clock_khz,
334 4700, /* tLOW = 4.7 us */
335 scl_falling_time,
336 0); /* No offset */
327 } 337 }
328 dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT); 338 dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
329 dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT); 339 dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
330 dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt); 340 dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
331 341
332 /* Fast-mode */ 342 /* Set SCL timing parameters for fast-mode */
333 hcnt = i2c_dw_scl_hcnt(input_clock_khz,
334 600, /* tHD;STA = tHIGH = 0.6 us */
335 sda_falling_time,
336 0, /* 0: DW default, 1: Ideal */
337 0); /* No offset */
338 lcnt = i2c_dw_scl_lcnt(input_clock_khz,
339 1300, /* tLOW = 1.3 us */
340 scl_falling_time,
341 0); /* No offset */
342
343 if (dev->fs_hcnt && dev->fs_lcnt) { 343 if (dev->fs_hcnt && dev->fs_lcnt) {
344 hcnt = dev->fs_hcnt; 344 hcnt = dev->fs_hcnt;
345 lcnt = dev->fs_lcnt; 345 lcnt = dev->fs_lcnt;
346 } else {
347 hcnt = i2c_dw_scl_hcnt(input_clock_khz,
348 600, /* tHD;STA = tHIGH = 0.6 us */
349 sda_falling_time,
350 0, /* 0: DW default, 1: Ideal */
351 0); /* No offset */
352 lcnt = i2c_dw_scl_lcnt(input_clock_khz,
353 1300, /* tLOW = 1.3 us */
354 scl_falling_time,
355 0); /* No offset */
346 } 356 }
347 dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT); 357 dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
348 dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT); 358 dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
@@ -364,6 +374,9 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
364 374
365 /* configure the i2c master */ 375 /* configure the i2c master */
366 dw_writel(dev, dev->master_cfg , DW_IC_CON); 376 dw_writel(dev, dev->master_cfg , DW_IC_CON);
377
378 if (dev->release_lock)
379 dev->release_lock(dev);
367 return 0; 380 return 0;
368} 381}
369EXPORT_SYMBOL_GPL(i2c_dw_init); 382EXPORT_SYMBOL_GPL(i2c_dw_init);
@@ -627,6 +640,14 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
627 dev->abort_source = 0; 640 dev->abort_source = 0;
628 dev->rx_outstanding = 0; 641 dev->rx_outstanding = 0;
629 642
643 if (dev->acquire_lock) {
644 ret = dev->acquire_lock(dev);
645 if (ret) {
646 dev_err(dev->dev, "couldn't acquire bus ownership\n");
647 goto done_nolock;
648 }
649 }
650
630 ret = i2c_dw_wait_bus_not_busy(dev); 651 ret = i2c_dw_wait_bus_not_busy(dev);
631 if (ret < 0) 652 if (ret < 0)
632 goto done; 653 goto done;
@@ -672,6 +693,10 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
672 ret = -EIO; 693 ret = -EIO;
673 694
674done: 695done:
696 if (dev->release_lock)
697 dev->release_lock(dev);
698
699done_nolock:
675 pm_runtime_mark_last_busy(dev->dev); 700 pm_runtime_mark_last_busy(dev->dev);
676 pm_runtime_put_autosuspend(dev->dev); 701 pm_runtime_put_autosuspend(dev->dev);
677 mutex_unlock(&dev->lock); 702 mutex_unlock(&dev->lock);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 5a410ef17abd..9630222abf32 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -61,6 +61,9 @@
61 * @ss_lcnt: standard speed LCNT value 61 * @ss_lcnt: standard speed LCNT value
62 * @fs_hcnt: fast speed HCNT value 62 * @fs_hcnt: fast speed HCNT value
63 * @fs_lcnt: fast speed LCNT value 63 * @fs_lcnt: fast speed LCNT value
64 * @acquire_lock: function to acquire a hardware lock on the bus
65 * @release_lock: function to release a hardware lock on the bus
66 * @pm_runtime_disabled: true if pm runtime is disabled
64 * 67 *
65 * HCNT and LCNT parameters can be used if the platform knows more accurate 68 * HCNT and LCNT parameters can be used if the platform knows more accurate
66 * values than the one computed based only on the input clock frequency. 69 * values than the one computed based only on the input clock frequency.
@@ -101,6 +104,9 @@ struct dw_i2c_dev {
101 u16 ss_lcnt; 104 u16 ss_lcnt;
102 u16 fs_hcnt; 105 u16 fs_hcnt;
103 u16 fs_lcnt; 106 u16 fs_lcnt;
107 int (*acquire_lock)(struct dw_i2c_dev *dev);
108 void (*release_lock)(struct dw_i2c_dev *dev);
109 bool pm_runtime_disabled;
104}; 110};
105 111
106#define ACCESS_SWAP 0x00000001 112#define ACCESS_SWAP 0x00000001
@@ -119,3 +125,9 @@ extern void i2c_dw_disable(struct dw_i2c_dev *dev);
119extern void i2c_dw_clear_int(struct dw_i2c_dev *dev); 125extern void i2c_dw_clear_int(struct dw_i2c_dev *dev);
120extern void i2c_dw_disable_int(struct dw_i2c_dev *dev); 126extern void i2c_dw_disable_int(struct dw_i2c_dev *dev);
121extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev); 127extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
128
129#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL)
130extern int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev);
131#else
132static inline int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) { return 0; }
133#endif
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index acb40f95db78..6643d2dc0b25 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -6,7 +6,7 @@
6 * Copyright (C) 2006 Texas Instruments. 6 * Copyright (C) 2006 Texas Instruments.
7 * Copyright (C) 2007 MontaVista Software Inc. 7 * Copyright (C) 2007 MontaVista Software Inc.
8 * Copyright (C) 2009 Provigent Ltd. 8 * Copyright (C) 2009 Provigent Ltd.
9 * Copyright (C) 2011 Intel corporation. 9 * Copyright (C) 2011, 2015 Intel Corporation.
10 * 10 *
11 * ---------------------------------------------------------------------------- 11 * ----------------------------------------------------------------------------
12 * 12 *
@@ -40,10 +40,6 @@
40#define DRIVER_NAME "i2c-designware-pci" 40#define DRIVER_NAME "i2c-designware-pci"
41 41
42enum dw_pci_ctl_id_t { 42enum dw_pci_ctl_id_t {
43 moorestown_0,
44 moorestown_1,
45 moorestown_2,
46
47 medfield_0, 43 medfield_0,
48 medfield_1, 44 medfield_1,
49 medfield_2, 45 medfield_2,
@@ -101,28 +97,7 @@ static struct dw_scl_sda_cfg hsw_config = {
101 .sda_hold = 0x9, 97 .sda_hold = 0x9,
102}; 98};
103 99
104static struct dw_pci_controller dw_pci_controllers[] = { 100static struct dw_pci_controller dw_pci_controllers[] = {
105 [moorestown_0] = {
106 .bus_num = 0,
107 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
108 .tx_fifo_depth = 32,
109 .rx_fifo_depth = 32,
110 .clk_khz = 25000,
111 },
112 [moorestown_1] = {
113 .bus_num = 1,
114 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
115 .tx_fifo_depth = 32,
116 .rx_fifo_depth = 32,
117 .clk_khz = 25000,
118 },
119 [moorestown_2] = {
120 .bus_num = 2,
121 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
122 .tx_fifo_depth = 32,
123 .rx_fifo_depth = 32,
124 .clk_khz = 25000,
125 },
126 [medfield_0] = { 101 [medfield_0] = {
127 .bus_num = 0, 102 .bus_num = 0,
128 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, 103 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
@@ -170,7 +145,6 @@ static struct dw_pci_controller dw_pci_controllers[] = {
170 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, 145 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
171 .tx_fifo_depth = 32, 146 .tx_fifo_depth = 32,
172 .rx_fifo_depth = 32, 147 .rx_fifo_depth = 32,
173 .clk_khz = 100000,
174 .functionality = I2C_FUNC_10BIT_ADDR, 148 .functionality = I2C_FUNC_10BIT_ADDR,
175 .scl_sda_cfg = &byt_config, 149 .scl_sda_cfg = &byt_config,
176 }, 150 },
@@ -179,7 +153,6 @@ static struct dw_pci_controller dw_pci_controllers[] = {
179 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, 153 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
180 .tx_fifo_depth = 32, 154 .tx_fifo_depth = 32,
181 .rx_fifo_depth = 32, 155 .rx_fifo_depth = 32,
182 .clk_khz = 100000,
183 .functionality = I2C_FUNC_10BIT_ADDR, 156 .functionality = I2C_FUNC_10BIT_ADDR,
184 .scl_sda_cfg = &hsw_config, 157 .scl_sda_cfg = &hsw_config,
185 }, 158 },
@@ -259,7 +232,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
259 dev->functionality = controller->functionality | 232 dev->functionality = controller->functionality |
260 DW_DEFAULT_FUNCTIONALITY; 233 DW_DEFAULT_FUNCTIONALITY;
261 234
262 dev->master_cfg = controller->bus_cfg; 235 dev->master_cfg = controller->bus_cfg;
263 if (controller->scl_sda_cfg) { 236 if (controller->scl_sda_cfg) {
264 cfg = controller->scl_sda_cfg; 237 cfg = controller->scl_sda_cfg;
265 dev->ss_hcnt = cfg->ss_hcnt; 238 dev->ss_hcnt = cfg->ss_hcnt;
@@ -325,12 +298,8 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
325MODULE_ALIAS("i2c_designware-pci"); 298MODULE_ALIAS("i2c_designware-pci");
326 299
327static const struct pci_device_id i2_designware_pci_ids[] = { 300static const struct pci_device_id i2_designware_pci_ids[] = {
328 /* Moorestown */
329 { PCI_VDEVICE(INTEL, 0x0802), moorestown_0 },
330 { PCI_VDEVICE(INTEL, 0x0803), moorestown_1 },
331 { PCI_VDEVICE(INTEL, 0x0804), moorestown_2 },
332 /* Medfield */ 301 /* Medfield */
333 { PCI_VDEVICE(INTEL, 0x0817), medfield_3,}, 302 { PCI_VDEVICE(INTEL, 0x0817), medfield_3 },
334 { PCI_VDEVICE(INTEL, 0x0818), medfield_4 }, 303 { PCI_VDEVICE(INTEL, 0x0818), medfield_4 },
335 { PCI_VDEVICE(INTEL, 0x0819), medfield_5 }, 304 { PCI_VDEVICE(INTEL, 0x0819), medfield_5 },
336 { PCI_VDEVICE(INTEL, 0x082C), medfield_0 }, 305 { PCI_VDEVICE(INTEL, 0x082C), medfield_0 },
@@ -348,7 +317,7 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
348 { PCI_VDEVICE(INTEL, 0x9c61), haswell }, 317 { PCI_VDEVICE(INTEL, 0x9c61), haswell },
349 { PCI_VDEVICE(INTEL, 0x9c62), haswell }, 318 { PCI_VDEVICE(INTEL, 0x9c62), haswell },
350 /* Braswell / Cherrytrail */ 319 /* Braswell / Cherrytrail */
351 { PCI_VDEVICE(INTEL, 0x22C1), baytrail,}, 320 { PCI_VDEVICE(INTEL, 0x22C1), baytrail },
352 { PCI_VDEVICE(INTEL, 0x22C2), baytrail }, 321 { PCI_VDEVICE(INTEL, 0x22C2), baytrail },
353 { PCI_VDEVICE(INTEL, 0x22C3), baytrail }, 322 { PCI_VDEVICE(INTEL, 0x22C3), baytrail },
354 { PCI_VDEVICE(INTEL, 0x22C4), baytrail }, 323 { PCI_VDEVICE(INTEL, 0x22C4), baytrail },
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 2b463c313e4e..c270f5f9a8f9 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -195,6 +195,10 @@ static int dw_i2c_probe(struct platform_device *pdev)
195 clk_freq = pdata->i2c_scl_freq; 195 clk_freq = pdata->i2c_scl_freq;
196 } 196 }
197 197
198 r = i2c_dw_eval_lock_support(dev);
199 if (r)
200 return r;
201
198 dev->functionality = 202 dev->functionality =
199 I2C_FUNC_I2C | 203 I2C_FUNC_I2C |
200 I2C_FUNC_10BIT_ADDR | 204 I2C_FUNC_10BIT_ADDR |
@@ -257,10 +261,14 @@ static int dw_i2c_probe(struct platform_device *pdev)
257 return r; 261 return r;
258 } 262 }
259 263
260 pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); 264 if (dev->pm_runtime_disabled) {
261 pm_runtime_use_autosuspend(&pdev->dev); 265 pm_runtime_forbid(&pdev->dev);
262 pm_runtime_set_active(&pdev->dev); 266 } else {
263 pm_runtime_enable(&pdev->dev); 267 pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
268 pm_runtime_use_autosuspend(&pdev->dev);
269 pm_runtime_set_active(&pdev->dev);
270 pm_runtime_enable(&pdev->dev);
271 }
264 272
265 return 0; 273 return 0;
266} 274}
@@ -310,7 +318,9 @@ static int dw_i2c_resume(struct device *dev)
310 struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); 318 struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
311 319
312 clk_prepare_enable(i_dev->clk); 320 clk_prepare_enable(i_dev->clk);
313 i2c_dw_init(i_dev); 321
322 if (!i_dev->pm_runtime_disabled)
323 i2c_dw_init(i_dev);
314 324
315 return 0; 325 return 0;
316} 326}
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 7f3a9fe9bf4e..d7b26fc6f432 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -201,7 +201,7 @@ struct imx_i2c_struct {
201 void __iomem *base; 201 void __iomem *base;
202 wait_queue_head_t queue; 202 wait_queue_head_t queue;
203 unsigned long i2csr; 203 unsigned long i2csr;
204 unsigned int disable_delay; 204 unsigned int disable_delay;
205 int stopped; 205 int stopped;
206 unsigned int ifdr; /* IMX_I2C_IFDR */ 206 unsigned int ifdr; /* IMX_I2C_IFDR */
207 unsigned int cur_clk; 207 unsigned int cur_clk;
@@ -295,7 +295,6 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
295 dma->chan_tx = dma_request_slave_channel(dev, "tx"); 295 dma->chan_tx = dma_request_slave_channel(dev, "tx");
296 if (!dma->chan_tx) { 296 if (!dma->chan_tx) {
297 dev_dbg(dev, "can't request DMA tx channel\n"); 297 dev_dbg(dev, "can't request DMA tx channel\n");
298 ret = -ENODEV;
299 goto fail_al; 298 goto fail_al;
300 } 299 }
301 300
@@ -313,7 +312,6 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
313 dma->chan_rx = dma_request_slave_channel(dev, "rx"); 312 dma->chan_rx = dma_request_slave_channel(dev, "rx");
314 if (!dma->chan_rx) { 313 if (!dma->chan_rx) {
315 dev_dbg(dev, "can't request DMA rx channel\n"); 314 dev_dbg(dev, "can't request DMA rx channel\n");
316 ret = -ENODEV;
317 goto fail_tx; 315 goto fail_tx;
318 } 316 }
319 317
@@ -481,8 +479,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx)
481 i2c_clk_rate = clk_get_rate(i2c_imx->clk); 479 i2c_clk_rate = clk_get_rate(i2c_imx->clk);
482 if (i2c_imx->cur_clk == i2c_clk_rate) 480 if (i2c_imx->cur_clk == i2c_clk_rate)
483 return; 481 return;
484 else 482
485 i2c_imx->cur_clk = i2c_clk_rate; 483 i2c_imx->cur_clk = i2c_clk_rate;
486 484
487 div = (i2c_clk_rate + i2c_imx->bitrate - 1) / i2c_imx->bitrate; 485 div = (i2c_clk_rate + i2c_imx->bitrate - 1) / i2c_imx->bitrate;
488 if (div < i2c_clk_div[0].div) 486 if (div < i2c_clk_div[0].div)
@@ -490,7 +488,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx)
490 else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div) 488 else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div)
491 i = i2c_imx->hwdata->ndivs - 1; 489 i = i2c_imx->hwdata->ndivs - 1;
492 else 490 else
493 for (i = 0; i2c_clk_div[i].div < div; i++); 491 for (i = 0; i2c_clk_div[i].div < div; i++)
492 ;
494 493
495 /* Store divider value */ 494 /* Store divider value */
496 i2c_imx->ifdr = i2c_clk_div[i].val; 495 i2c_imx->ifdr = i2c_clk_div[i].val;
@@ -628,9 +627,9 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
628 result = wait_for_completion_timeout( 627 result = wait_for_completion_timeout(
629 &i2c_imx->dma->cmd_complete, 628 &i2c_imx->dma->cmd_complete,
630 msecs_to_jiffies(DMA_TIMEOUT)); 629 msecs_to_jiffies(DMA_TIMEOUT));
631 if (result <= 0) { 630 if (result == 0) {
632 dmaengine_terminate_all(dma->chan_using); 631 dmaengine_terminate_all(dma->chan_using);
633 return result ?: -ETIMEDOUT; 632 return -ETIMEDOUT;
634 } 633 }
635 634
636 /* Waiting for transfer complete. */ 635 /* Waiting for transfer complete. */
@@ -686,9 +685,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
686 result = wait_for_completion_timeout( 685 result = wait_for_completion_timeout(
687 &i2c_imx->dma->cmd_complete, 686 &i2c_imx->dma->cmd_complete,
688 msecs_to_jiffies(DMA_TIMEOUT)); 687 msecs_to_jiffies(DMA_TIMEOUT));
689 if (result <= 0) { 688 if (result == 0) {
690 dmaengine_terminate_all(dma->chan_using); 689 dmaengine_terminate_all(dma->chan_using);
691 return result ?: -ETIMEDOUT; 690 return -ETIMEDOUT;
692 } 691 }
693 692
694 /* waiting for transfer complete. */ 693 /* waiting for transfer complete. */
@@ -822,6 +821,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
822 /* read data */ 821 /* read data */
823 for (i = 0; i < msgs->len; i++) { 822 for (i = 0; i < msgs->len; i++) {
824 u8 len = 0; 823 u8 len = 0;
824
825 result = i2c_imx_trx_complete(i2c_imx); 825 result = i2c_imx_trx_complete(i2c_imx);
826 if (result) 826 if (result)
827 return result; 827 return result;
@@ -917,15 +917,16 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
917 /* write/read data */ 917 /* write/read data */
918#ifdef CONFIG_I2C_DEBUG_BUS 918#ifdef CONFIG_I2C_DEBUG_BUS
919 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); 919 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
920 dev_dbg(&i2c_imx->adapter.dev, "<%s> CONTROL: IEN=%d, IIEN=%d, " 920 dev_dbg(&i2c_imx->adapter.dev,
921 "MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n", __func__, 921 "<%s> CONTROL: IEN=%d, IIEN=%d, MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n",
922 __func__,
922 (temp & I2CR_IEN ? 1 : 0), (temp & I2CR_IIEN ? 1 : 0), 923 (temp & I2CR_IEN ? 1 : 0), (temp & I2CR_IIEN ? 1 : 0),
923 (temp & I2CR_MSTA ? 1 : 0), (temp & I2CR_MTX ? 1 : 0), 924 (temp & I2CR_MSTA ? 1 : 0), (temp & I2CR_MTX ? 1 : 0),
924 (temp & I2CR_TXAK ? 1 : 0), (temp & I2CR_RSTA ? 1 : 0)); 925 (temp & I2CR_TXAK ? 1 : 0), (temp & I2CR_RSTA ? 1 : 0));
925 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); 926 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
926 dev_dbg(&i2c_imx->adapter.dev, 927 dev_dbg(&i2c_imx->adapter.dev,
927 "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, " 928 "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n",
928 "IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n", __func__, 929 __func__,
929 (temp & I2SR_ICF ? 1 : 0), (temp & I2SR_IAAS ? 1 : 0), 930 (temp & I2SR_ICF ? 1 : 0), (temp & I2SR_IAAS ? 1 : 0),
930 (temp & I2SR_IBB ? 1 : 0), (temp & I2SR_IAL ? 1 : 0), 931 (temp & I2SR_IBB ? 1 : 0), (temp & I2SR_IAL ? 1 : 0),
931 (temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0), 932 (temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0),
@@ -1004,7 +1005,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
1004 i2c_imx->adapter.owner = THIS_MODULE; 1005 i2c_imx->adapter.owner = THIS_MODULE;
1005 i2c_imx->adapter.algo = &i2c_imx_algo; 1006 i2c_imx->adapter.algo = &i2c_imx_algo;
1006 i2c_imx->adapter.dev.parent = &pdev->dev; 1007 i2c_imx->adapter.dev.parent = &pdev->dev;
1007 i2c_imx->adapter.nr = pdev->id; 1008 i2c_imx->adapter.nr = pdev->id;
1008 i2c_imx->adapter.dev.of_node = pdev->dev.of_node; 1009 i2c_imx->adapter.dev.of_node = pdev->dev.of_node;
1009 i2c_imx->base = base; 1010 i2c_imx->base = base;
1010 1011
@@ -1063,7 +1064,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
1063 i2c_imx->adapter.name); 1064 i2c_imx->adapter.name);
1064 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); 1065 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
1065 1066
1066 /* Init DMA config if support*/ 1067 /* Init DMA config if supported */
1067 i2c_imx_dma_request(i2c_imx, phy_addr); 1068 i2c_imx_dma_request(i2c_imx, phy_addr);
1068 1069
1069 return 0; /* Return OK */ 1070 return 0; /* Return OK */
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 7249b5b1e5d0..abf5db7e441e 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -12,6 +12,7 @@
12 * kind, whether express or implied. 12 * kind, whether express or implied.
13 */ 13 */
14 14
15#include <linux/clk.h>
15#include <linux/err.h> 16#include <linux/err.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/module.h> 18#include <linux/module.h>
@@ -35,7 +36,9 @@ struct ocores_i2c {
35 int pos; 36 int pos;
36 int nmsgs; 37 int nmsgs;
37 int state; /* see STATE_ */ 38 int state; /* see STATE_ */
38 int clock_khz; 39 struct clk *clk;
40 int ip_clock_khz;
41 int bus_clock_khz;
39 void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value); 42 void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value);
40 u8 (*getreg)(struct ocores_i2c *i2c, int reg); 43 u8 (*getreg)(struct ocores_i2c *i2c, int reg);
41}; 44};
@@ -215,21 +218,34 @@ static int ocores_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
215 return -ETIMEDOUT; 218 return -ETIMEDOUT;
216} 219}
217 220
218static void ocores_init(struct ocores_i2c *i2c) 221static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
219{ 222{
220 int prescale; 223 int prescale;
224 int diff;
221 u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL); 225 u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL);
222 226
223 /* make sure the device is disabled */ 227 /* make sure the device is disabled */
224 oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN)); 228 oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN));
225 229
226 prescale = (i2c->clock_khz / (5*100)) - 1; 230 prescale = (i2c->ip_clock_khz / (5 * i2c->bus_clock_khz)) - 1;
231 prescale = clamp(prescale, 0, 0xffff);
232
233 diff = i2c->ip_clock_khz / (5 * (prescale + 1)) - i2c->bus_clock_khz;
234 if (abs(diff) > i2c->bus_clock_khz / 10) {
235 dev_err(dev,
236 "Unsupported clock settings: core: %d KHz, bus: %d KHz\n",
237 i2c->ip_clock_khz, i2c->bus_clock_khz);
238 return -EINVAL;
239 }
240
227 oc_setreg(i2c, OCI2C_PRELOW, prescale & 0xff); 241 oc_setreg(i2c, OCI2C_PRELOW, prescale & 0xff);
228 oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8); 242 oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8);
229 243
230 /* Init the device */ 244 /* Init the device */
231 oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK); 245 oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
232 oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_IEN | OCI2C_CTRL_EN); 246 oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_IEN | OCI2C_CTRL_EN);
247
248 return 0;
233} 249}
234 250
235 251
@@ -304,6 +320,8 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
304 struct device_node *np = pdev->dev.of_node; 320 struct device_node *np = pdev->dev.of_node;
305 const struct of_device_id *match; 321 const struct of_device_id *match;
306 u32 val; 322 u32 val;
323 u32 clock_frequency;
324 bool clock_frequency_present;
307 325
308 if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) { 326 if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) {
309 /* no 'reg-shift', check for deprecated 'regstep' */ 327 /* no 'reg-shift', check for deprecated 'regstep' */
@@ -319,12 +337,42 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
319 } 337 }
320 } 338 }
321 339
322 if (of_property_read_u32(np, "clock-frequency", &val)) { 340 clock_frequency_present = !of_property_read_u32(np, "clock-frequency",
323 dev_err(&pdev->dev, 341 &clock_frequency);
324 "Missing required parameter 'clock-frequency'\n"); 342 i2c->bus_clock_khz = 100;
325 return -ENODEV; 343
344 i2c->clk = devm_clk_get(&pdev->dev, NULL);
345
346 if (!IS_ERR(i2c->clk)) {
347 int ret = clk_prepare_enable(i2c->clk);
348
349 if (ret) {
350 dev_err(&pdev->dev,
351 "clk_prepare_enable failed: %d\n", ret);
352 return ret;
353 }
354 i2c->ip_clock_khz = clk_get_rate(i2c->clk) / 1000;
355 if (clock_frequency_present)
356 i2c->bus_clock_khz = clock_frequency / 1000;
357 }
358
359 if (i2c->ip_clock_khz == 0) {
360 if (of_property_read_u32(np, "opencores,ip-clock-frequency",
361 &val)) {
362 if (!clock_frequency_present) {
363 dev_err(&pdev->dev,
364 "Missing required parameter 'opencores,ip-clock-frequency'\n");
365 return -ENODEV;
366 }
367 i2c->ip_clock_khz = clock_frequency / 1000;
368 dev_warn(&pdev->dev,
369 "Deprecated usage of the 'clock-frequency' property, please update to 'opencores,ip-clock-frequency'\n");
370 } else {
371 i2c->ip_clock_khz = val / 1000;
372 if (clock_frequency_present)
373 i2c->bus_clock_khz = clock_frequency / 1000;
374 }
326 } 375 }
327 i2c->clock_khz = val / 1000;
328 376
329 of_property_read_u32(pdev->dev.of_node, "reg-io-width", 377 of_property_read_u32(pdev->dev.of_node, "reg-io-width",
330 &i2c->reg_io_width); 378 &i2c->reg_io_width);
@@ -368,7 +416,8 @@ static int ocores_i2c_probe(struct platform_device *pdev)
368 if (pdata) { 416 if (pdata) {
369 i2c->reg_shift = pdata->reg_shift; 417 i2c->reg_shift = pdata->reg_shift;
370 i2c->reg_io_width = pdata->reg_io_width; 418 i2c->reg_io_width = pdata->reg_io_width;
371 i2c->clock_khz = pdata->clock_khz; 419 i2c->ip_clock_khz = pdata->clock_khz;
420 i2c->bus_clock_khz = 100;
372 } else { 421 } else {
373 ret = ocores_i2c_of_probe(pdev, i2c); 422 ret = ocores_i2c_of_probe(pdev, i2c);
374 if (ret) 423 if (ret)
@@ -402,7 +451,9 @@ static int ocores_i2c_probe(struct platform_device *pdev)
402 } 451 }
403 } 452 }
404 453
405 ocores_init(i2c); 454 ret = ocores_init(&pdev->dev, i2c);
455 if (ret)
456 return ret;
406 457
407 init_waitqueue_head(&i2c->wait); 458 init_waitqueue_head(&i2c->wait);
408 ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0, 459 ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
@@ -446,6 +497,9 @@ static int ocores_i2c_remove(struct platform_device *pdev)
446 /* remove adapter & data */ 497 /* remove adapter & data */
447 i2c_del_adapter(&i2c->adap); 498 i2c_del_adapter(&i2c->adap);
448 499
500 if (!IS_ERR(i2c->clk))
501 clk_disable_unprepare(i2c->clk);
502
449 return 0; 503 return 0;
450} 504}
451 505
@@ -458,6 +512,8 @@ static int ocores_i2c_suspend(struct device *dev)
458 /* make sure the device is disabled */ 512 /* make sure the device is disabled */
459 oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN)); 513 oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN));
460 514
515 if (!IS_ERR(i2c->clk))
516 clk_disable_unprepare(i2c->clk);
461 return 0; 517 return 0;
462} 518}
463 519
@@ -465,9 +521,20 @@ static int ocores_i2c_resume(struct device *dev)
465{ 521{
466 struct ocores_i2c *i2c = dev_get_drvdata(dev); 522 struct ocores_i2c *i2c = dev_get_drvdata(dev);
467 523
468 ocores_init(i2c); 524 if (!IS_ERR(i2c->clk)) {
525 unsigned long rate;
526 int ret = clk_prepare_enable(i2c->clk);
469 527
470 return 0; 528 if (ret) {
529 dev_err(dev,
530 "clk_prepare_enable failed: %d\n", ret);
531 return ret;
532 }
533 rate = clk_get_rate(i2c->clk) / 1000;
534 if (rate)
535 i2c->ip_clock_khz = rate;
536 }
537 return ocores_init(dev, i2c);
471} 538}
472 539
473static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume); 540static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 44f03eed00dd..d37d9db6681e 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -148,13 +148,6 @@ static inline u32 pmcmsptwi_clock_to_reg(
148 return ((clock->filter & 0xf) << 12) | (clock->clock & 0x03ff); 148 return ((clock->filter & 0xf) << 12) | (clock->clock & 0x03ff);
149} 149}
150 150
151static inline void pmcmsptwi_reg_to_clock(
152 u32 reg, struct pmcmsptwi_clock *clock)
153{
154 clock->filter = (reg >> 12) & 0xf;
155 clock->clock = reg & 0x03ff;
156}
157
158static inline u32 pmcmsptwi_cfg_to_reg(const struct pmcmsptwi_cfg *cfg) 151static inline u32 pmcmsptwi_cfg_to_reg(const struct pmcmsptwi_cfg *cfg)
159{ 152{
160 return ((cfg->arbf & 0xf) << 12) | 153 return ((cfg->arbf & 0xf) << 12) |
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 92462843db66..5f96b1b3e3a5 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -102,6 +102,9 @@ struct rk3x_i2c {
102 102
103 /* Settings */ 103 /* Settings */
104 unsigned int scl_frequency; 104 unsigned int scl_frequency;
105 unsigned int scl_rise_ns;
106 unsigned int scl_fall_ns;
107 unsigned int sda_fall_ns;
105 108
106 /* Synchronization & notification */ 109 /* Synchronization & notification */
107 spinlock_t lock; 110 spinlock_t lock;
@@ -435,6 +438,9 @@ out:
435 * 438 *
436 * @clk_rate: I2C input clock rate 439 * @clk_rate: I2C input clock rate
437 * @scl_rate: Desired SCL rate 440 * @scl_rate: Desired SCL rate
441 * @scl_rise_ns: How many ns it takes for SCL to rise.
442 * @scl_fall_ns: How many ns it takes for SCL to fall.
443 * @sda_fall_ns: How many ns it takes for SDA to fall.
438 * @div_low: Divider output for low 444 * @div_low: Divider output for low
439 * @div_high: Divider output for high 445 * @div_high: Divider output for high
440 * 446 *
@@ -443,11 +449,16 @@ out:
443 * too high, we silently use the highest possible rate. 449 * too high, we silently use the highest possible rate.
444 */ 450 */
445static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate, 451static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
452 unsigned long scl_rise_ns,
453 unsigned long scl_fall_ns,
454 unsigned long sda_fall_ns,
446 unsigned long *div_low, unsigned long *div_high) 455 unsigned long *div_low, unsigned long *div_high)
447{ 456{
448 unsigned long min_low_ns, min_high_ns; 457 unsigned long spec_min_low_ns, spec_min_high_ns;
449 unsigned long max_data_hold_ns; 458 unsigned long spec_setup_start, spec_max_data_hold_ns;
450 unsigned long data_hold_buffer_ns; 459 unsigned long data_hold_buffer_ns;
460
461 unsigned long min_low_ns, min_high_ns;
451 unsigned long max_low_ns, min_total_ns; 462 unsigned long max_low_ns, min_total_ns;
452 463
453 unsigned long clk_rate_khz, scl_rate_khz; 464 unsigned long clk_rate_khz, scl_rate_khz;
@@ -469,29 +480,50 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
469 scl_rate = 1000; 480 scl_rate = 1000;
470 481
471 /* 482 /*
472 * min_low_ns: The minimum number of ns we need to hold low 483 * min_low_ns: The minimum number of ns we need to hold low to
473 * to meet i2c spec 484 * meet I2C specification, should include fall time.
474 * min_high_ns: The minimum number of ns we need to hold high 485 * min_high_ns: The minimum number of ns we need to hold high to
475 * to meet i2c spec 486 * meet I2C specification, should include rise time.
476 * max_low_ns: The maximum number of ns we can hold low 487 * max_low_ns: The maximum number of ns we can hold low to meet
477 * to meet i2c spec 488 * I2C specification.
478 * 489 *
479 * Note: max_low_ns should be (max data hold time * 2 - buffer) 490 * Note: max_low_ns should be (maximum data hold time * 2 - buffer)
480 * This is because the i2c host on Rockchip holds the data line 491 * This is because the i2c host on Rockchip holds the data line
481 * for half the low time. 492 * for half the low time.
482 */ 493 */
483 if (scl_rate <= 100000) { 494 if (scl_rate <= 100000) {
484 min_low_ns = 4700; 495 /* Standard-mode */
485 min_high_ns = 4000; 496 spec_min_low_ns = 4700;
486 max_data_hold_ns = 3450; 497 spec_setup_start = 4700;
498 spec_min_high_ns = 4000;
499 spec_max_data_hold_ns = 3450;
487 data_hold_buffer_ns = 50; 500 data_hold_buffer_ns = 50;
488 } else { 501 } else {
489 min_low_ns = 1300; 502 /* Fast-mode */
490 min_high_ns = 600; 503 spec_min_low_ns = 1300;
491 max_data_hold_ns = 900; 504 spec_setup_start = 600;
505 spec_min_high_ns = 600;
506 spec_max_data_hold_ns = 900;
492 data_hold_buffer_ns = 50; 507 data_hold_buffer_ns = 50;
493 } 508 }
494 max_low_ns = max_data_hold_ns * 2 - data_hold_buffer_ns; 509 min_high_ns = scl_rise_ns + spec_min_high_ns;
510
511 /*
512 * Timings for repeated start:
513 * - controller appears to drop SDA at .875x (7/8) programmed clk high.
514 * - controller appears to keep SCL high for 2x programmed clk high.
515 *
516 * We need to account for those rules in picking our "high" time so
517 * we meet tSU;STA and tHD;STA times.
518 */
519 min_high_ns = max(min_high_ns,
520 DIV_ROUND_UP((scl_rise_ns + spec_setup_start) * 1000, 875));
521 min_high_ns = max(min_high_ns,
522 DIV_ROUND_UP((scl_rise_ns + spec_setup_start +
523 sda_fall_ns + spec_min_high_ns), 2));
524
525 min_low_ns = scl_fall_ns + spec_min_low_ns;
526 max_low_ns = spec_max_data_hold_ns * 2 - data_hold_buffer_ns;
495 min_total_ns = min_low_ns + min_high_ns; 527 min_total_ns = min_low_ns + min_high_ns;
496 528
497 /* Adjust to avoid overflow */ 529 /* Adjust to avoid overflow */
@@ -510,8 +542,8 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
510 min_div_for_hold = (min_low_div + min_high_div); 542 min_div_for_hold = (min_low_div + min_high_div);
511 543
512 /* 544 /*
513 * This is the maximum divider so we don't go over the max. 545 * This is the maximum divider so we don't go over the maximum.
514 * We don't round up here (we round down) since this is a max. 546 * We don't round up here (we round down) since this is a maximum.
515 */ 547 */
516 max_low_div = clk_rate_khz * max_low_ns / (8 * 1000000); 548 max_low_div = clk_rate_khz * max_low_ns / (8 * 1000000);
517 549
@@ -544,7 +576,7 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
544 ideal_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns, 576 ideal_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns,
545 scl_rate_khz * 8 * min_total_ns); 577 scl_rate_khz * 8 * min_total_ns);
546 578
547 /* Don't allow it to go over the max */ 579 /* Don't allow it to go over the maximum */
548 if (ideal_low_div > max_low_div) 580 if (ideal_low_div > max_low_div)
549 ideal_low_div = max_low_div; 581 ideal_low_div = max_low_div;
550 582
@@ -588,9 +620,9 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
588 u64 t_low_ns, t_high_ns; 620 u64 t_low_ns, t_high_ns;
589 int ret; 621 int ret;
590 622
591 ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, &div_low, 623 ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, i2c->scl_rise_ns,
592 &div_high); 624 i2c->scl_fall_ns, i2c->sda_fall_ns,
593 625 &div_low, &div_high);
594 WARN_ONCE(ret != 0, "Could not reach SCL freq %u", i2c->scl_frequency); 626 WARN_ONCE(ret != 0, "Could not reach SCL freq %u", i2c->scl_frequency);
595 627
596 clk_enable(i2c->clk); 628 clk_enable(i2c->clk);
@@ -633,9 +665,10 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
633 switch (event) { 665 switch (event) {
634 case PRE_RATE_CHANGE: 666 case PRE_RATE_CHANGE:
635 if (rk3x_i2c_calc_divs(ndata->new_rate, i2c->scl_frequency, 667 if (rk3x_i2c_calc_divs(ndata->new_rate, i2c->scl_frequency,
636 &div_low, &div_high) != 0) { 668 i2c->scl_rise_ns, i2c->scl_fall_ns,
669 i2c->sda_fall_ns,
670 &div_low, &div_high) != 0)
637 return NOTIFY_STOP; 671 return NOTIFY_STOP;
638 }
639 672
640 /* scale up */ 673 /* scale up */
641 if (ndata->new_rate > ndata->old_rate) 674 if (ndata->new_rate > ndata->old_rate)
@@ -859,6 +892,24 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
859 i2c->scl_frequency = DEFAULT_SCL_RATE; 892 i2c->scl_frequency = DEFAULT_SCL_RATE;
860 } 893 }
861 894
895 /*
896 * Read rise and fall time from device tree. If not available use
897 * the default maximum timing from the specification.
898 */
899 if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-rising-time-ns",
900 &i2c->scl_rise_ns)) {
901 if (i2c->scl_frequency <= 100000)
902 i2c->scl_rise_ns = 1000;
903 else
904 i2c->scl_rise_ns = 300;
905 }
906 if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-falling-time-ns",
907 &i2c->scl_fall_ns))
908 i2c->scl_fall_ns = 300;
909 if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns",
910 &i2c->scl_fall_ns))
911 i2c->sda_fall_ns = i2c->scl_fall_ns;
912
862 strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name)); 913 strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
863 i2c->adap.owner = THIS_MODULE; 914 i2c->adap.owner = THIS_MODULE;
864 i2c->adap.algo = &rk3x_i2c_algorithm; 915 i2c->adap.algo = &rk3x_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 28b87e683503..29f14331dd9d 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -286,6 +286,7 @@ static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
286 if (rx_fifo_avail > 0 && buf_remaining > 0) { 286 if (rx_fifo_avail > 0 && buf_remaining > 0) {
287 BUG_ON(buf_remaining > 3); 287 BUG_ON(buf_remaining > 3);
288 val = i2c_readl(i2c_dev, I2C_RX_FIFO); 288 val = i2c_readl(i2c_dev, I2C_RX_FIFO);
289 val = cpu_to_le32(val);
289 memcpy(buf, &val, buf_remaining); 290 memcpy(buf, &val, buf_remaining);
290 buf_remaining = 0; 291 buf_remaining = 0;
291 rx_fifo_avail--; 292 rx_fifo_avail--;
@@ -344,6 +345,7 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
344 if (tx_fifo_avail > 0 && buf_remaining > 0) { 345 if (tx_fifo_avail > 0 && buf_remaining > 0) {
345 BUG_ON(buf_remaining > 3); 346 BUG_ON(buf_remaining > 3);
346 memcpy(&val, buf, buf_remaining); 347 memcpy(&val, buf, buf_remaining);
348 val = le32_to_cpu(val);
347 349
348 /* Again update before writing to FIFO to make sure isr sees. */ 350 /* Again update before writing to FIFO to make sure isr sees. */
349 i2c_dev->msg_buf_remaining = 0; 351 i2c_dev->msg_buf_remaining = 0;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index e9eae57a2b50..210cf4874cb7 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -102,7 +102,7 @@ static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
102 struct acpi_resource_i2c_serialbus *sb; 102 struct acpi_resource_i2c_serialbus *sb;
103 103
104 sb = &ares->data.i2c_serial_bus; 104 sb = &ares->data.i2c_serial_bus;
105 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) { 105 if (!info->addr && sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
106 info->addr = sb->slave_address; 106 info->addr = sb->slave_address;
107 if (sb->access_mode == ACPI_I2C_10BIT_MODE) 107 if (sb->access_mode == ACPI_I2C_10BIT_MODE)
108 info->flags |= I2C_CLIENT_TEN; 108 info->flags |= I2C_CLIENT_TEN;
@@ -698,101 +698,6 @@ static void i2c_device_shutdown(struct device *dev)
698 driver->shutdown(client); 698 driver->shutdown(client);
699} 699}
700 700
701#ifdef CONFIG_PM_SLEEP
702static int i2c_legacy_suspend(struct device *dev, pm_message_t mesg)
703{
704 struct i2c_client *client = i2c_verify_client(dev);
705 struct i2c_driver *driver;
706
707 if (!client || !dev->driver)
708 return 0;
709 driver = to_i2c_driver(dev->driver);
710 if (!driver->suspend)
711 return 0;
712 return driver->suspend(client, mesg);
713}
714
715static int i2c_legacy_resume(struct device *dev)
716{
717 struct i2c_client *client = i2c_verify_client(dev);
718 struct i2c_driver *driver;
719
720 if (!client || !dev->driver)
721 return 0;
722 driver = to_i2c_driver(dev->driver);
723 if (!driver->resume)
724 return 0;
725 return driver->resume(client);
726}
727
728static int i2c_device_pm_suspend(struct device *dev)
729{
730 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
731
732 if (pm)
733 return pm_generic_suspend(dev);
734 else
735 return i2c_legacy_suspend(dev, PMSG_SUSPEND);
736}
737
738static int i2c_device_pm_resume(struct device *dev)
739{
740 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
741
742 if (pm)
743 return pm_generic_resume(dev);
744 else
745 return i2c_legacy_resume(dev);
746}
747
748static int i2c_device_pm_freeze(struct device *dev)
749{
750 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
751
752 if (pm)
753 return pm_generic_freeze(dev);
754 else
755 return i2c_legacy_suspend(dev, PMSG_FREEZE);
756}
757
758static int i2c_device_pm_thaw(struct device *dev)
759{
760 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
761
762 if (pm)
763 return pm_generic_thaw(dev);
764 else
765 return i2c_legacy_resume(dev);
766}
767
768static int i2c_device_pm_poweroff(struct device *dev)
769{
770 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
771
772 if (pm)
773 return pm_generic_poweroff(dev);
774 else
775 return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
776}
777
778static int i2c_device_pm_restore(struct device *dev)
779{
780 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
781
782 if (pm)
783 return pm_generic_restore(dev);
784 else
785 return i2c_legacy_resume(dev);
786}
787#else /* !CONFIG_PM_SLEEP */
788#define i2c_device_pm_suspend NULL
789#define i2c_device_pm_resume NULL
790#define i2c_device_pm_freeze NULL
791#define i2c_device_pm_thaw NULL
792#define i2c_device_pm_poweroff NULL
793#define i2c_device_pm_restore NULL
794#endif /* !CONFIG_PM_SLEEP */
795
796static void i2c_client_dev_release(struct device *dev) 701static void i2c_client_dev_release(struct device *dev)
797{ 702{
798 kfree(to_i2c_client(dev)); 703 kfree(to_i2c_client(dev));
@@ -804,6 +709,7 @@ show_name(struct device *dev, struct device_attribute *attr, char *buf)
804 return sprintf(buf, "%s\n", dev->type == &i2c_client_type ? 709 return sprintf(buf, "%s\n", dev->type == &i2c_client_type ?
805 to_i2c_client(dev)->name : to_i2c_adapter(dev)->name); 710 to_i2c_client(dev)->name : to_i2c_adapter(dev)->name);
806} 711}
712static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
807 713
808static ssize_t 714static ssize_t
809show_modalias(struct device *dev, struct device_attribute *attr, char *buf) 715show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
@@ -817,8 +723,6 @@ show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
817 723
818 return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name); 724 return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
819} 725}
820
821static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
822static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL); 726static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
823 727
824static struct attribute *i2c_dev_attrs[] = { 728static struct attribute *i2c_dev_attrs[] = {
@@ -827,29 +731,7 @@ static struct attribute *i2c_dev_attrs[] = {
827 &dev_attr_modalias.attr, 731 &dev_attr_modalias.attr,
828 NULL 732 NULL
829}; 733};
830 734ATTRIBUTE_GROUPS(i2c_dev);
831static struct attribute_group i2c_dev_attr_group = {
832 .attrs = i2c_dev_attrs,
833};
834
835static const struct attribute_group *i2c_dev_attr_groups[] = {
836 &i2c_dev_attr_group,
837 NULL
838};
839
840static const struct dev_pm_ops i2c_device_pm_ops = {
841 .suspend = i2c_device_pm_suspend,
842 .resume = i2c_device_pm_resume,
843 .freeze = i2c_device_pm_freeze,
844 .thaw = i2c_device_pm_thaw,
845 .poweroff = i2c_device_pm_poweroff,
846 .restore = i2c_device_pm_restore,
847 SET_RUNTIME_PM_OPS(
848 pm_generic_runtime_suspend,
849 pm_generic_runtime_resume,
850 NULL
851 )
852};
853 735
854struct bus_type i2c_bus_type = { 736struct bus_type i2c_bus_type = {
855 .name = "i2c", 737 .name = "i2c",
@@ -857,12 +739,11 @@ struct bus_type i2c_bus_type = {
857 .probe = i2c_device_probe, 739 .probe = i2c_device_probe,
858 .remove = i2c_device_remove, 740 .remove = i2c_device_remove,
859 .shutdown = i2c_device_shutdown, 741 .shutdown = i2c_device_shutdown,
860 .pm = &i2c_device_pm_ops,
861}; 742};
862EXPORT_SYMBOL_GPL(i2c_bus_type); 743EXPORT_SYMBOL_GPL(i2c_bus_type);
863 744
864static struct device_type i2c_client_type = { 745static struct device_type i2c_client_type = {
865 .groups = i2c_dev_attr_groups, 746 .groups = i2c_dev_groups,
866 .uevent = i2c_device_uevent, 747 .uevent = i2c_device_uevent,
867 .release = i2c_client_dev_release, 748 .release = i2c_client_dev_release,
868}; 749};
@@ -1261,6 +1142,7 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
1261 1142
1262 return count; 1143 return count;
1263} 1144}
1145static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
1264 1146
1265/* 1147/*
1266 * And of course let the users delete the devices they instantiated, if 1148 * And of course let the users delete the devices they instantiated, if
@@ -1315,8 +1197,6 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
1315 "delete_device"); 1197 "delete_device");
1316 return res; 1198 return res;
1317} 1199}
1318
1319static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
1320static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL, 1200static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
1321 i2c_sysfs_delete_device); 1201 i2c_sysfs_delete_device);
1322 1202
@@ -1326,18 +1206,10 @@ static struct attribute *i2c_adapter_attrs[] = {
1326 &dev_attr_delete_device.attr, 1206 &dev_attr_delete_device.attr,
1327 NULL 1207 NULL
1328}; 1208};
1329 1209ATTRIBUTE_GROUPS(i2c_adapter);
1330static struct attribute_group i2c_adapter_attr_group = {
1331 .attrs = i2c_adapter_attrs,
1332};
1333
1334static const struct attribute_group *i2c_adapter_attr_groups[] = {
1335 &i2c_adapter_attr_group,
1336 NULL
1337};
1338 1210
1339struct device_type i2c_adapter_type = { 1211struct device_type i2c_adapter_type = {
1340 .groups = i2c_adapter_attr_groups, 1212 .groups = i2c_adapter_groups,
1341 .release = i2c_adapter_dev_release, 1213 .release = i2c_adapter_dev_release,
1342}; 1214};
1343EXPORT_SYMBOL_GPL(i2c_adapter_type); 1215EXPORT_SYMBOL_GPL(i2c_adapter_type);
@@ -1419,8 +1291,6 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
1419 if (of_get_property(node, "wakeup-source", NULL)) 1291 if (of_get_property(node, "wakeup-source", NULL))
1420 info.flags |= I2C_CLIENT_WAKE; 1292 info.flags |= I2C_CLIENT_WAKE;
1421 1293
1422 request_module("%s%s", I2C_MODULE_PREFIX, info.type);
1423
1424 result = i2c_new_device(adap, &info); 1294 result = i2c_new_device(adap, &info);
1425 if (result == NULL) { 1295 if (result == NULL) {
1426 dev_err(&adap->dev, "of_i2c: Failure registering %s\n", 1296 dev_err(&adap->dev, "of_i2c: Failure registering %s\n",
@@ -1796,11 +1666,15 @@ void i2c_del_adapter(struct i2c_adapter *adap)
1796 /* device name is gone after device_unregister */ 1666 /* device name is gone after device_unregister */
1797 dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); 1667 dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
1798 1668
1799 /* clean up the sysfs representation */ 1669 /* wait until all references to the device are gone
1670 *
1671 * FIXME: This is old code and should ideally be replaced by an
1672 * alternative which results in decoupling the lifetime of the struct
1673 * device from the i2c_adapter, like spi or netdev do. Any solution
1674 * should be throughly tested with DEBUG_KOBJECT_RELEASE enabled!
1675 */
1800 init_completion(&adap->dev_released); 1676 init_completion(&adap->dev_released);
1801 device_unregister(&adap->dev); 1677 device_unregister(&adap->dev);
1802
1803 /* wait for sysfs to drop all references */
1804 wait_for_completion(&adap->dev_released); 1678 wait_for_completion(&adap->dev_released);
1805 1679
1806 /* free bus id */ 1680 /* free bus id */
@@ -1859,14 +1733,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
1859 if (res) 1733 if (res)
1860 return res; 1734 return res;
1861 1735
1862 /* Drivers should switch to dev_pm_ops instead. */
1863 if (driver->suspend)
1864 pr_warn("i2c-core: driver [%s] using legacy suspend method\n",
1865 driver->driver.name);
1866 if (driver->resume)
1867 pr_warn("i2c-core: driver [%s] using legacy resume method\n",
1868 driver->driver.name);
1869
1870 pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); 1736 pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
1871 1737
1872 INIT_LIST_HEAD(&driver->clients); 1738 INIT_LIST_HEAD(&driver->clients);
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index ec11b404b433..3d8f4fe2e47e 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -41,6 +41,7 @@
41#include <linux/i2c-mux.h> 41#include <linux/i2c-mux.h>
42#include <linux/i2c/pca954x.h> 42#include <linux/i2c/pca954x.h>
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/of.h>
44#include <linux/pm.h> 45#include <linux/pm.h>
45#include <linux/slab.h> 46#include <linux/slab.h>
46 47
@@ -186,6 +187,8 @@ static int pca954x_probe(struct i2c_client *client,
186{ 187{
187 struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); 188 struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
188 struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev); 189 struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
190 struct device_node *of_node = client->dev.of_node;
191 bool idle_disconnect_dt;
189 struct gpio_desc *gpio; 192 struct gpio_desc *gpio;
190 int num, force, class; 193 int num, force, class;
191 struct pca954x *data; 194 struct pca954x *data;
@@ -217,8 +220,13 @@ static int pca954x_probe(struct i2c_client *client,
217 data->type = id->driver_data; 220 data->type = id->driver_data;
218 data->last_chan = 0; /* force the first selection */ 221 data->last_chan = 0; /* force the first selection */
219 222
223 idle_disconnect_dt = of_node &&
224 of_property_read_bool(of_node, "i2c-mux-idle-disconnect");
225
220 /* Now create an adapter for each channel */ 226 /* Now create an adapter for each channel */
221 for (num = 0; num < chips[data->type].nchans; num++) { 227 for (num = 0; num < chips[data->type].nchans; num++) {
228 bool idle_disconnect_pd = false;
229
222 force = 0; /* dynamic adap number */ 230 force = 0; /* dynamic adap number */
223 class = 0; /* no class by default */ 231 class = 0; /* no class by default */
224 if (pdata) { 232 if (pdata) {
@@ -229,12 +237,13 @@ static int pca954x_probe(struct i2c_client *client,
229 } else 237 } else
230 /* discard unconfigured channels */ 238 /* discard unconfigured channels */
231 break; 239 break;
240 idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
232 } 241 }
233 242
234 data->virt_adaps[num] = 243 data->virt_adaps[num] =
235 i2c_add_mux_adapter(adap, &client->dev, client, 244 i2c_add_mux_adapter(adap, &client->dev, client,
236 force, num, class, pca954x_select_chan, 245 force, num, class, pca954x_select_chan,
237 (pdata && pdata->modes[num].deselect_on_exit) 246 (idle_disconnect_pd || idle_disconnect_dt)
238 ? pca954x_deselect_mux : NULL); 247 ? pca954x_deselect_mux : NULL);
239 248
240 if (data->virt_adaps[num] == NULL) { 249 if (data->virt_adaps[num] == NULL) {
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 4132935dc929..4011effe4c05 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -21,7 +21,7 @@ config IIO_BUFFER
21if IIO_BUFFER 21if IIO_BUFFER
22 22
23config IIO_BUFFER_CB 23config IIO_BUFFER_CB
24boolean "IIO callback buffer used for push in-kernel interfaces" 24 bool "IIO callback buffer used for push in-kernel interfaces"
25 help 25 help
26 Should be selected by any drivers that do in-kernel push 26 Should be selected by any drivers that do in-kernel push
27 usage. That is, those where the data is pushed to the consumer. 27 usage. That is, those where the data is pushed to the consumer.
@@ -43,7 +43,7 @@ config IIO_TRIGGERED_BUFFER
43endif # IIO_BUFFER 43endif # IIO_BUFFER
44 44
45config IIO_TRIGGER 45config IIO_TRIGGER
46 boolean "Enable triggered sampling support" 46 bool "Enable triggered sampling support"
47 help 47 help
48 Provides IIO core support for triggers. Currently these 48 Provides IIO core support for triggers. Currently these
49 are used to initialize capture of samples to push into 49 are used to initialize capture of samples to push into
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 56a4b7ca7ee3..45d67e9228d7 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
1124 if (!optlen) 1124 if (!optlen)
1125 return -EINVAL; 1125 return -EINVAL;
1126 1126
1127 memset(&sa_path, 0, sizeof(sa_path));
1128 sa_path.vlan_id = 0xffff;
1129
1127 ib_sa_unpack_path(path_data->path_rec, &sa_path); 1130 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1128 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1); 1131 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1129 if (ret) 1132 if (ret)
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 6095872549e7..8b8cc6fa0ab0 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -294,7 +294,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
294 if (likely(ib_umem_start(umem) != ib_umem_end(umem))) 294 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
295 rbt_ib_umem_insert(&umem->odp_data->interval_tree, 295 rbt_ib_umem_insert(&umem->odp_data->interval_tree,
296 &context->umem_tree); 296 &context->umem_tree);
297 if (likely(!atomic_read(&context->notifier_count))) 297 if (likely(!atomic_read(&context->notifier_count)) ||
298 context->odp_mrs_count == 1)
298 umem->odp_data->mn_counters_active = true; 299 umem->odp_data->mn_counters_active = true;
299 else 300 else
300 list_add(&umem->odp_data->no_private_counters, 301 list_add(&umem->odp_data->no_private_counters,
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 643c08a025a5..b716b0815644 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -258,5 +258,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
258 258
259IB_UVERBS_DECLARE_EX_CMD(create_flow); 259IB_UVERBS_DECLARE_EX_CMD(create_flow);
260IB_UVERBS_DECLARE_EX_CMD(destroy_flow); 260IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
261IB_UVERBS_DECLARE_EX_CMD(query_device);
261 262
262#endif /* UVERBS_H */ 263#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index b7943ff16ed3..a9f048990dfc 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -400,6 +400,52 @@ err:
400 return ret; 400 return ret;
401} 401}
402 402
403static void copy_query_dev_fields(struct ib_uverbs_file *file,
404 struct ib_uverbs_query_device_resp *resp,
405 struct ib_device_attr *attr)
406{
407 resp->fw_ver = attr->fw_ver;
408 resp->node_guid = file->device->ib_dev->node_guid;
409 resp->sys_image_guid = attr->sys_image_guid;
410 resp->max_mr_size = attr->max_mr_size;
411 resp->page_size_cap = attr->page_size_cap;
412 resp->vendor_id = attr->vendor_id;
413 resp->vendor_part_id = attr->vendor_part_id;
414 resp->hw_ver = attr->hw_ver;
415 resp->max_qp = attr->max_qp;
416 resp->max_qp_wr = attr->max_qp_wr;
417 resp->device_cap_flags = attr->device_cap_flags;
418 resp->max_sge = attr->max_sge;
419 resp->max_sge_rd = attr->max_sge_rd;
420 resp->max_cq = attr->max_cq;
421 resp->max_cqe = attr->max_cqe;
422 resp->max_mr = attr->max_mr;
423 resp->max_pd = attr->max_pd;
424 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
425 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
426 resp->max_res_rd_atom = attr->max_res_rd_atom;
427 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
428 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
429 resp->atomic_cap = attr->atomic_cap;
430 resp->max_ee = attr->max_ee;
431 resp->max_rdd = attr->max_rdd;
432 resp->max_mw = attr->max_mw;
433 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
434 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
435 resp->max_mcast_grp = attr->max_mcast_grp;
436 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
437 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
438 resp->max_ah = attr->max_ah;
439 resp->max_fmr = attr->max_fmr;
440 resp->max_map_per_fmr = attr->max_map_per_fmr;
441 resp->max_srq = attr->max_srq;
442 resp->max_srq_wr = attr->max_srq_wr;
443 resp->max_srq_sge = attr->max_srq_sge;
444 resp->max_pkeys = attr->max_pkeys;
445 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
446 resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
447}
448
403ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 449ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
404 const char __user *buf, 450 const char __user *buf,
405 int in_len, int out_len) 451 int in_len, int out_len)
@@ -420,47 +466,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
420 return ret; 466 return ret;
421 467
422 memset(&resp, 0, sizeof resp); 468 memset(&resp, 0, sizeof resp);
423 469 copy_query_dev_fields(file, &resp, &attr);
424 resp.fw_ver = attr.fw_ver;
425 resp.node_guid = file->device->ib_dev->node_guid;
426 resp.sys_image_guid = attr.sys_image_guid;
427 resp.max_mr_size = attr.max_mr_size;
428 resp.page_size_cap = attr.page_size_cap;
429 resp.vendor_id = attr.vendor_id;
430 resp.vendor_part_id = attr.vendor_part_id;
431 resp.hw_ver = attr.hw_ver;
432 resp.max_qp = attr.max_qp;
433 resp.max_qp_wr = attr.max_qp_wr;
434 resp.device_cap_flags = attr.device_cap_flags;
435 resp.max_sge = attr.max_sge;
436 resp.max_sge_rd = attr.max_sge_rd;
437 resp.max_cq = attr.max_cq;
438 resp.max_cqe = attr.max_cqe;
439 resp.max_mr = attr.max_mr;
440 resp.max_pd = attr.max_pd;
441 resp.max_qp_rd_atom = attr.max_qp_rd_atom;
442 resp.max_ee_rd_atom = attr.max_ee_rd_atom;
443 resp.max_res_rd_atom = attr.max_res_rd_atom;
444 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
445 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
446 resp.atomic_cap = attr.atomic_cap;
447 resp.max_ee = attr.max_ee;
448 resp.max_rdd = attr.max_rdd;
449 resp.max_mw = attr.max_mw;
450 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
451 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
452 resp.max_mcast_grp = attr.max_mcast_grp;
453 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
454 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
455 resp.max_ah = attr.max_ah;
456 resp.max_fmr = attr.max_fmr;
457 resp.max_map_per_fmr = attr.max_map_per_fmr;
458 resp.max_srq = attr.max_srq;
459 resp.max_srq_wr = attr.max_srq_wr;
460 resp.max_srq_sge = attr.max_srq_sge;
461 resp.max_pkeys = attr.max_pkeys;
462 resp.local_ca_ack_delay = attr.local_ca_ack_delay;
463 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
464 470
465 if (copy_to_user((void __user *) (unsigned long) cmd.response, 471 if (copy_to_user((void __user *) (unsigned long) cmd.response,
466 &resp, sizeof resp)) 472 &resp, sizeof resp))
@@ -2091,20 +2097,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2091 if (qp->real_qp == qp) { 2097 if (qp->real_qp == qp) {
2092 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask); 2098 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
2093 if (ret) 2099 if (ret)
2094 goto out; 2100 goto release_qp;
2095 ret = qp->device->modify_qp(qp, attr, 2101 ret = qp->device->modify_qp(qp, attr,
2096 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 2102 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2097 } else { 2103 } else {
2098 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 2104 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2099 } 2105 }
2100 2106
2101 put_qp_read(qp);
2102
2103 if (ret) 2107 if (ret)
2104 goto out; 2108 goto release_qp;
2105 2109
2106 ret = in_len; 2110 ret = in_len;
2107 2111
2112release_qp:
2113 put_qp_read(qp);
2114
2108out: 2115out:
2109 kfree(attr); 2116 kfree(attr);
2110 2117
@@ -3287,3 +3294,64 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3287 3294
3288 return ret ? ret : in_len; 3295 return ret ? ret : in_len;
3289} 3296}
3297
3298int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3299 struct ib_udata *ucore,
3300 struct ib_udata *uhw)
3301{
3302 struct ib_uverbs_ex_query_device_resp resp;
3303 struct ib_uverbs_ex_query_device cmd;
3304 struct ib_device_attr attr;
3305 struct ib_device *device;
3306 int err;
3307
3308 device = file->device->ib_dev;
3309 if (ucore->inlen < sizeof(cmd))
3310 return -EINVAL;
3311
3312 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3313 if (err)
3314 return err;
3315
3316 if (cmd.comp_mask)
3317 return -EINVAL;
3318
3319 if (cmd.reserved)
3320 return -EINVAL;
3321
3322 resp.response_length = offsetof(typeof(resp), odp_caps);
3323
3324 if (ucore->outlen < resp.response_length)
3325 return -ENOSPC;
3326
3327 err = device->query_device(device, &attr);
3328 if (err)
3329 return err;
3330
3331 copy_query_dev_fields(file, &resp.base, &attr);
3332 resp.comp_mask = 0;
3333
3334 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3335 goto end;
3336
3337#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3338 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3339 resp.odp_caps.per_transport_caps.rc_odp_caps =
3340 attr.odp_caps.per_transport_caps.rc_odp_caps;
3341 resp.odp_caps.per_transport_caps.uc_odp_caps =
3342 attr.odp_caps.per_transport_caps.uc_odp_caps;
3343 resp.odp_caps.per_transport_caps.ud_odp_caps =
3344 attr.odp_caps.per_transport_caps.ud_odp_caps;
3345 resp.odp_caps.reserved = 0;
3346#else
3347 memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
3348#endif
3349 resp.response_length += sizeof(resp.odp_caps);
3350
3351end:
3352 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
3353 if (err)
3354 return err;
3355
3356 return 0;
3357}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5db1a8cc388d..259dcc7779f5 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -123,6 +123,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
123 struct ib_udata *uhw) = { 123 struct ib_udata *uhw) = {
124 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, 124 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
125 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, 125 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
126 [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device,
126}; 127};
127 128
128static void ib_uverbs_add_one(struct ib_device *device); 129static void ib_uverbs_add_one(struct ib_device *device);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 794555dc86a5..bdfac2ccb704 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -225,13 +225,20 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
225 struct c4iw_cq *chp; 225 struct c4iw_cq *chp;
226 unsigned long flag; 226 unsigned long flag;
227 227
228 spin_lock_irqsave(&dev->lock, flag);
228 chp = get_chp(dev, qid); 229 chp = get_chp(dev, qid);
229 if (chp) { 230 if (chp) {
231 atomic_inc(&chp->refcnt);
232 spin_unlock_irqrestore(&dev->lock, flag);
230 t4_clear_cq_armed(&chp->cq); 233 t4_clear_cq_armed(&chp->cq);
231 spin_lock_irqsave(&chp->comp_handler_lock, flag); 234 spin_lock_irqsave(&chp->comp_handler_lock, flag);
232 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 235 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
233 spin_unlock_irqrestore(&chp->comp_handler_lock, flag); 236 spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
234 } else 237 if (atomic_dec_and_test(&chp->refcnt))
238 wake_up(&chp->wait);
239 } else {
235 PDBG("%s unknown cqid 0x%x\n", __func__, qid); 240 PDBG("%s unknown cqid 0x%x\n", __func__, qid);
241 spin_unlock_irqrestore(&dev->lock, flag);
242 }
236 return 0; 243 return 0;
237} 244}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index b5678ac97393..d87e1650f643 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -196,7 +196,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
196 return (int)(rdev->lldi.vr->stag.size >> 5); 196 return (int)(rdev->lldi.vr->stag.size >> 5);
197} 197}
198 198
199#define C4IW_WR_TO (30*HZ) 199#define C4IW_WR_TO (60*HZ)
200 200
201struct c4iw_wr_wait { 201struct c4iw_wr_wait {
202 struct completion completion; 202 struct completion completion;
@@ -220,22 +220,21 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
220 u32 hwtid, u32 qpid, 220 u32 hwtid, u32 qpid,
221 const char *func) 221 const char *func)
222{ 222{
223 unsigned to = C4IW_WR_TO;
224 int ret; 223 int ret;
225 224
226 do { 225 if (c4iw_fatal_error(rdev)) {
227 ret = wait_for_completion_timeout(&wr_waitp->completion, to); 226 wr_waitp->ret = -EIO;
228 if (!ret) { 227 goto out;
229 printk(KERN_ERR MOD "%s - Device %s not responding - " 228 }
230 "tid %u qpid %u\n", func, 229
231 pci_name(rdev->lldi.pdev), hwtid, qpid); 230 ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
232 if (c4iw_fatal_error(rdev)) { 231 if (!ret) {
233 wr_waitp->ret = -EIO; 232 PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
234 break; 233 func, pci_name(rdev->lldi.pdev), hwtid, qpid);
235 } 234 rdev->flags |= T4_FATAL_ERROR;
236 to = to << 2; 235 wr_waitp->ret = -EIO;
237 } 236 }
238 } while (!ret); 237out:
239 if (wr_waitp->ret) 238 if (wr_waitp->ret)
240 PDBG("%s: FW reply %d tid %u qpid %u\n", 239 PDBG("%s: FW reply %d tid %u qpid %u\n",
241 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); 240 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 4977082e081f..33c45dfcbd88 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -277,7 +277,7 @@ static int remove_file(struct dentry *parent, char *name)
277 } 277 }
278 278
279 spin_lock(&tmp->d_lock); 279 spin_lock(&tmp->d_lock);
280 if (!(d_unhashed(tmp) && tmp->d_inode)) { 280 if (!d_unhashed(tmp) && tmp->d_inode) {
281 dget_dlock(tmp); 281 dget_dlock(tmp);
282 __d_drop(tmp); 282 __d_drop(tmp);
283 spin_unlock(&tmp->d_lock); 283 spin_unlock(&tmp->d_lock);
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 6559af60bffd..e08db7020cd4 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -908,9 +908,6 @@ void ipath_chip_cleanup(struct ipath_devdata *);
908/* clean up any chip type-specific stuff */ 908/* clean up any chip type-specific stuff */
909void ipath_chip_done(void); 909void ipath_chip_done(void);
910 910
911/* check to see if we have to force ordering for write combining */
912int ipath_unordered_wc(void);
913
914void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first, 911void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
915 unsigned cnt); 912 unsigned cnt);
916void ipath_cancel_sends(struct ipath_devdata *, int); 913void ipath_cancel_sends(struct ipath_devdata *, int);
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
index 1d7bd82a1fb1..1a7e20a75149 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
+++ b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
@@ -47,16 +47,3 @@ int ipath_enable_wc(struct ipath_devdata *dd)
47{ 47{
48 return 0; 48 return 0;
49} 49}
50
51/**
52 * ipath_unordered_wc - indicate whether write combining is unordered
53 *
54 * Because our performance depends on our ability to do write
55 * combining mmio writes in the most efficient way, we need to
56 * know if we are on a processor that may reorder stores when
57 * write combining.
58 */
59int ipath_unordered_wc(void)
60{
61 return 1;
62}
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
index 3428acb0868c..4ad0b932df1f 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
+++ b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
@@ -167,18 +167,3 @@ void ipath_disable_wc(struct ipath_devdata *dd)
167 dd->ipath_wc_cookie = 0; /* even on failure */ 167 dd->ipath_wc_cookie = 0; /* even on failure */
168 } 168 }
169} 169}
170
171/**
172 * ipath_unordered_wc - indicate whether write combining is ordered
173 *
174 * Because our performance depends on our ability to do write combining mmio
175 * writes in the most efficient way, we need to know if we are on an Intel
176 * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
177 * the order completed, and so no special flushing is required to get
178 * correct ordering. Intel processors, however, will flush write buffers
179 * out in "random" orders, and so explicit ordering is needed at times.
180 */
181int ipath_unordered_wc(void)
182{
183 return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
184}
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 56a593e0ae5d..39a488889fc7 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -372,7 +372,7 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
372 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id); 372 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
373 if (*slave < 0) { 373 if (*slave < 0) {
374 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n", 374 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
375 gid.global.interface_id); 375 be64_to_cpu(gid.global.interface_id));
376 return -ENOENT; 376 return -ENOENT;
377 } 377 }
378 return 0; 378 return 0;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 543ecdd8667b..0176caa5792c 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -369,8 +369,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
369 int err; 369 int err;
370 370
371 mutex_lock(&cq->resize_mutex); 371 mutex_lock(&cq->resize_mutex);
372 372 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
373 if (entries < 1) {
374 err = -EINVAL; 373 err = -EINVAL;
375 goto out; 374 goto out;
376 } 375 }
@@ -381,7 +380,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
381 goto out; 380 goto out;
382 } 381 }
383 382
384 if (entries > dev->dev->caps.max_cqes) { 383 if (entries > dev->dev->caps.max_cqes + 1) {
385 err = -EINVAL; 384 err = -EINVAL;
386 goto out; 385 goto out;
387 } 386 }
@@ -394,7 +393,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
394 /* Can't be smaller than the number of outstanding CQEs */ 393 /* Can't be smaller than the number of outstanding CQEs */
395 outst_cqe = mlx4_ib_get_outstanding_cqes(cq); 394 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
396 if (entries < outst_cqe + 1) { 395 if (entries < outst_cqe + 1) {
397 err = 0; 396 err = -EINVAL;
398 goto out; 397 goto out;
399 } 398 }
400 399
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index eb8e215f1613..ac6e2b710ea6 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1269,8 +1269,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1269 struct mlx4_dev *dev = mdev->dev; 1269 struct mlx4_dev *dev = mdev->dev;
1270 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1270 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1271 struct mlx4_ib_steering *ib_steering = NULL; 1271 struct mlx4_ib_steering *ib_steering = NULL;
1272 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? 1272 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1273 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1274 struct mlx4_flow_reg_id reg_id; 1273 struct mlx4_flow_reg_id reg_id;
1275 1274
1276 if (mdev->dev->caps.steering_mode == 1275 if (mdev->dev->caps.steering_mode ==
@@ -1284,8 +1283,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1284 !!(mqp->flags & 1283 !!(mqp->flags &
1285 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 1284 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1286 prot, &reg_id.id); 1285 prot, &reg_id.id);
1287 if (err) 1286 if (err) {
1287 pr_err("multicast attach op failed, err %d\n", err);
1288 goto err_malloc; 1288 goto err_malloc;
1289 }
1289 1290
1290 reg_id.mirror = 0; 1291 reg_id.mirror = 0;
1291 if (mlx4_is_bonded(dev)) { 1292 if (mlx4_is_bonded(dev)) {
@@ -1348,9 +1349,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1348 struct net_device *ndev; 1349 struct net_device *ndev;
1349 struct mlx4_ib_gid_entry *ge; 1350 struct mlx4_ib_gid_entry *ge;
1350 struct mlx4_flow_reg_id reg_id = {0, 0}; 1351 struct mlx4_flow_reg_id reg_id = {0, 0};
1351 1352 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1352 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1353 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1354 1353
1355 if (mdev->dev->caps.steering_mode == 1354 if (mdev->dev->caps.steering_mode ==
1356 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1355 MLX4_STEERING_MODE_DEVICE_MANAGED) {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index dfc6ca128a7e..ed2bd6701f9b 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1696,8 +1696,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1696 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || 1696 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
1697 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { 1697 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
1698 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); 1698 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
1699 if (err) 1699 if (err) {
1700 return -EINVAL; 1700 err = -EINVAL;
1701 goto out;
1702 }
1701 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) 1703 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
1702 dev->qp1_proxy[qp->port - 1] = qp; 1704 dev->qp1_proxy[qp->port - 1] = qp;
1703 } 1705 }
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 03bf81211a54..cc4ac1e583b2 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -997,7 +997,7 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
997 struct ib_device_attr *dprops = NULL; 997 struct ib_device_attr *dprops = NULL;
998 struct ib_port_attr *pprops = NULL; 998 struct ib_port_attr *pprops = NULL;
999 struct mlx5_general_caps *gen; 999 struct mlx5_general_caps *gen;
1000 int err = 0; 1000 int err = -ENOMEM;
1001 int port; 1001 int port;
1002 1002
1003 gen = &dev->mdev->caps.gen; 1003 gen = &dev->mdev->caps.gen;
@@ -1331,6 +1331,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1331 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 1331 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1332 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 1332 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
1333 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 1333 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
1334 dev->ib_dev.uverbs_ex_cmd_mask =
1335 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
1334 1336
1335 dev->ib_dev.query_device = mlx5_ib_query_device; 1337 dev->ib_dev.query_device = mlx5_ib_query_device;
1336 dev->ib_dev.query_port = mlx5_ib_query_port; 1338 dev->ib_dev.query_port = mlx5_ib_query_port;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 32a28bd50b20..cd9822eeacae 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1012,6 +1012,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
1012 goto err_2; 1012 goto err_2;
1013 } 1013 }
1014 mr->umem = umem; 1014 mr->umem = umem;
1015 mr->dev = dev;
1015 mr->live = 1; 1016 mr->live = 1;
1016 kvfree(in); 1017 kvfree(in);
1017 1018
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index b43456ae124b..c9780d919769 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -40,7 +40,7 @@
40#include <be_roce.h> 40#include <be_roce.h>
41#include "ocrdma_sli.h" 41#include "ocrdma_sli.h"
42 42
43#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u" 43#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u"
44 44
45#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" 45#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
46#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" 46#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
@@ -55,12 +55,19 @@
55#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 55#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
56 56
57#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo) 57#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
58#define EQ_INTR_PER_SEC_THRSH_HI 150000
59#define EQ_INTR_PER_SEC_THRSH_LOW 100000
60#define EQ_AIC_MAX_EQD 20
61#define EQ_AIC_MIN_EQD 0
62
63void ocrdma_eqd_set_task(struct work_struct *work);
58 64
59struct ocrdma_dev_attr { 65struct ocrdma_dev_attr {
60 u8 fw_ver[32]; 66 u8 fw_ver[32];
61 u32 vendor_id; 67 u32 vendor_id;
62 u32 device_id; 68 u32 device_id;
63 u16 max_pd; 69 u16 max_pd;
70 u16 max_dpp_pds;
64 u16 max_cq; 71 u16 max_cq;
65 u16 max_cqe; 72 u16 max_cqe;
66 u16 max_qp; 73 u16 max_qp;
@@ -116,12 +123,19 @@ struct ocrdma_queue_info {
116 bool created; 123 bool created;
117}; 124};
118 125
126struct ocrdma_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
127 u32 prev_eqd;
128 u64 eq_intr_cnt;
129 u64 prev_eq_intr_cnt;
130};
131
119struct ocrdma_eq { 132struct ocrdma_eq {
120 struct ocrdma_queue_info q; 133 struct ocrdma_queue_info q;
121 u32 vector; 134 u32 vector;
122 int cq_cnt; 135 int cq_cnt;
123 struct ocrdma_dev *dev; 136 struct ocrdma_dev *dev;
124 char irq_name[32]; 137 char irq_name[32];
138 struct ocrdma_aic_obj aic_obj;
125}; 139};
126 140
127struct ocrdma_mq { 141struct ocrdma_mq {
@@ -171,6 +185,21 @@ struct ocrdma_stats {
171 struct ocrdma_dev *dev; 185 struct ocrdma_dev *dev;
172}; 186};
173 187
188struct ocrdma_pd_resource_mgr {
189 u32 pd_norm_start;
190 u16 pd_norm_count;
191 u16 pd_norm_thrsh;
192 u16 max_normal_pd;
193 u32 pd_dpp_start;
194 u16 pd_dpp_count;
195 u16 pd_dpp_thrsh;
196 u16 max_dpp_pd;
197 u16 dpp_page_index;
198 unsigned long *pd_norm_bitmap;
199 unsigned long *pd_dpp_bitmap;
200 bool pd_prealloc_valid;
201};
202
174struct stats_mem { 203struct stats_mem {
175 struct ocrdma_mqe mqe; 204 struct ocrdma_mqe mqe;
176 void *va; 205 void *va;
@@ -198,6 +227,7 @@ struct ocrdma_dev {
198 227
199 struct ocrdma_eq *eq_tbl; 228 struct ocrdma_eq *eq_tbl;
200 int eq_cnt; 229 int eq_cnt;
230 struct delayed_work eqd_work;
201 u16 base_eqid; 231 u16 base_eqid;
202 u16 max_eq; 232 u16 max_eq;
203 233
@@ -255,7 +285,12 @@ struct ocrdma_dev {
255 struct ocrdma_stats rx_qp_err_stats; 285 struct ocrdma_stats rx_qp_err_stats;
256 struct ocrdma_stats tx_dbg_stats; 286 struct ocrdma_stats tx_dbg_stats;
257 struct ocrdma_stats rx_dbg_stats; 287 struct ocrdma_stats rx_dbg_stats;
288 struct ocrdma_stats driver_stats;
289 struct ocrdma_stats reset_stats;
258 struct dentry *dir; 290 struct dentry *dir;
291 atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS];
292 atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR];
293 struct ocrdma_pd_resource_mgr *pd_mgr;
259}; 294};
260 295
261struct ocrdma_cq { 296struct ocrdma_cq {
@@ -335,7 +370,6 @@ struct ocrdma_srq {
335 370
336struct ocrdma_qp { 371struct ocrdma_qp {
337 struct ib_qp ibqp; 372 struct ib_qp ibqp;
338 struct ocrdma_dev *dev;
339 373
340 u8 __iomem *sq_db; 374 u8 __iomem *sq_db;
341 struct ocrdma_qp_hwq_info sq; 375 struct ocrdma_qp_hwq_info sq;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index f3cc8c9e65ae..d812904f3984 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -29,19 +29,22 @@
29#include <net/netevent.h> 29#include <net/netevent.h>
30 30
31#include <rdma/ib_addr.h> 31#include <rdma/ib_addr.h>
32#include <rdma/ib_mad.h>
32 33
33#include "ocrdma.h" 34#include "ocrdma.h"
34#include "ocrdma_verbs.h" 35#include "ocrdma_verbs.h"
35#include "ocrdma_ah.h" 36#include "ocrdma_ah.h"
36#include "ocrdma_hw.h" 37#include "ocrdma_hw.h"
38#include "ocrdma_stats.h"
37 39
38#define OCRDMA_VID_PCP_SHIFT 0xD 40#define OCRDMA_VID_PCP_SHIFT 0xD
39 41
40static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, 42static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
41 struct ib_ah_attr *attr, union ib_gid *sgid, int pdid) 43 struct ib_ah_attr *attr, union ib_gid *sgid,
44 int pdid, bool *isvlan)
42{ 45{
43 int status = 0; 46 int status = 0;
44 u16 vlan_tag; bool vlan_enabled = false; 47 u16 vlan_tag;
45 struct ocrdma_eth_vlan eth; 48 struct ocrdma_eth_vlan eth;
46 struct ocrdma_grh grh; 49 struct ocrdma_grh grh;
47 int eth_sz; 50 int eth_sz;
@@ -59,7 +62,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
59 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT; 62 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
60 eth.vlan_tag = cpu_to_be16(vlan_tag); 63 eth.vlan_tag = cpu_to_be16(vlan_tag);
61 eth_sz = sizeof(struct ocrdma_eth_vlan); 64 eth_sz = sizeof(struct ocrdma_eth_vlan);
62 vlan_enabled = true; 65 *isvlan = true;
63 } else { 66 } else {
64 eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); 67 eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
65 eth_sz = sizeof(struct ocrdma_eth_basic); 68 eth_sz = sizeof(struct ocrdma_eth_basic);
@@ -82,7 +85,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
82 /* Eth HDR */ 85 /* Eth HDR */
83 memcpy(&ah->av->eth_hdr, &eth, eth_sz); 86 memcpy(&ah->av->eth_hdr, &eth, eth_sz);
84 memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); 87 memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
85 if (vlan_enabled) 88 if (*isvlan)
86 ah->av->valid |= OCRDMA_AV_VLAN_VALID; 89 ah->av->valid |= OCRDMA_AV_VLAN_VALID;
87 ah->av->valid = cpu_to_le32(ah->av->valid); 90 ah->av->valid = cpu_to_le32(ah->av->valid);
88 return status; 91 return status;
@@ -91,6 +94,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
91struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) 94struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
92{ 95{
93 u32 *ahid_addr; 96 u32 *ahid_addr;
97 bool isvlan = false;
94 int status; 98 int status;
95 struct ocrdma_ah *ah; 99 struct ocrdma_ah *ah;
96 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 100 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
@@ -127,15 +131,20 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
127 } 131 }
128 } 132 }
129 133
130 status = set_av_attr(dev, ah, attr, &sgid, pd->id); 134 status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan);
131 if (status) 135 if (status)
132 goto av_conf_err; 136 goto av_conf_err;
133 137
134 /* if pd is for the user process, pass the ah_id to user space */ 138 /* if pd is for the user process, pass the ah_id to user space */
135 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) { 139 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
136 ahid_addr = pd->uctx->ah_tbl.va + attr->dlid; 140 ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
137 *ahid_addr = ah->id; 141 *ahid_addr = 0;
142 *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
143 if (isvlan)
144 *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
145 OCRDMA_AH_VLAN_VALID_SHIFT);
138 } 146 }
147
139 return &ah->ibah; 148 return &ah->ibah;
140 149
141av_conf_err: 150av_conf_err:
@@ -191,5 +200,20 @@ int ocrdma_process_mad(struct ib_device *ibdev,
191 struct ib_grh *in_grh, 200 struct ib_grh *in_grh,
192 struct ib_mad *in_mad, struct ib_mad *out_mad) 201 struct ib_mad *in_mad, struct ib_mad *out_mad)
193{ 202{
194 return IB_MAD_RESULT_SUCCESS; 203 int status;
204 struct ocrdma_dev *dev;
205
206 switch (in_mad->mad_hdr.mgmt_class) {
207 case IB_MGMT_CLASS_PERF_MGMT:
208 dev = get_ocrdma_dev(ibdev);
209 if (!ocrdma_pma_counters(dev, out_mad))
210 status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
211 else
212 status = IB_MAD_RESULT_SUCCESS;
213 break;
214 default:
215 status = IB_MAD_RESULT_SUCCESS;
216 break;
217 }
218 return status;
195} 219}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 8ac49e7f96d1..726a87cf22dc 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -28,6 +28,12 @@
28#ifndef __OCRDMA_AH_H__ 28#ifndef __OCRDMA_AH_H__
29#define __OCRDMA_AH_H__ 29#define __OCRDMA_AH_H__
30 30
31enum {
32 OCRDMA_AH_ID_MASK = 0x3FF,
33 OCRDMA_AH_VLAN_VALID_MASK = 0x01,
34 OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F
35};
36
31struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *); 37struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
32int ocrdma_destroy_ah(struct ib_ah *); 38int ocrdma_destroy_ah(struct ib_ah *);
33int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *); 39int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 638bff1ffc6c..0c9e95909a64 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -734,6 +734,9 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
734 break; 734 break;
735 } 735 }
736 736
737 if (type < OCRDMA_MAX_ASYNC_ERRORS)
738 atomic_inc(&dev->async_err_stats[type]);
739
737 if (qp_event) { 740 if (qp_event) {
738 if (qp->ibqp.event_handler) 741 if (qp->ibqp.event_handler)
739 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context); 742 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
@@ -831,20 +834,20 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
831 return 0; 834 return 0;
832} 835}
833 836
834static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, 837static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
835 struct ocrdma_cq *cq) 838 struct ocrdma_cq *cq, bool sq)
836{ 839{
837 unsigned long flags;
838 struct ocrdma_qp *qp; 840 struct ocrdma_qp *qp;
839 bool buddy_cq_found = false; 841 struct list_head *cur;
840 /* Go through list of QPs in error state which are using this CQ 842 struct ocrdma_cq *bcq = NULL;
841 * and invoke its callback handler to trigger CQE processing for 843 struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
842 * error/flushed CQE. It is rare to find more than few entries in 844
843 * this list as most consumers stops after getting error CQE. 845 list_for_each(cur, head) {
844 * List is traversed only once when a matching buddy cq found for a QP. 846 if (sq)
845 */ 847 qp = list_entry(cur, struct ocrdma_qp, sq_entry);
846 spin_lock_irqsave(&dev->flush_q_lock, flags); 848 else
847 list_for_each_entry(qp, &cq->sq_head, sq_entry) { 849 qp = list_entry(cur, struct ocrdma_qp, rq_entry);
850
848 if (qp->srq) 851 if (qp->srq)
849 continue; 852 continue;
850 /* if wq and rq share the same cq, than comp_handler 853 /* if wq and rq share the same cq, than comp_handler
@@ -856,19 +859,41 @@ static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
856 * if completion came on rq, sq's cq is buddy cq. 859 * if completion came on rq, sq's cq is buddy cq.
857 */ 860 */
858 if (qp->sq_cq == cq) 861 if (qp->sq_cq == cq)
859 cq = qp->rq_cq; 862 bcq = qp->rq_cq;
860 else 863 else
861 cq = qp->sq_cq; 864 bcq = qp->sq_cq;
862 buddy_cq_found = true; 865 return bcq;
863 break;
864 } 866 }
867 return NULL;
868}
869
870static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
871 struct ocrdma_cq *cq)
872{
873 unsigned long flags;
874 struct ocrdma_cq *bcq = NULL;
875
876 /* Go through list of QPs in error state which are using this CQ
877 * and invoke its callback handler to trigger CQE processing for
878 * error/flushed CQE. It is rare to find more than few entries in
879 * this list as most consumers stops after getting error CQE.
880 * List is traversed only once when a matching buddy cq found for a QP.
881 */
882 spin_lock_irqsave(&dev->flush_q_lock, flags);
883 /* Check if buddy CQ is present.
884 * true - Check for SQ CQ
885 * false - Check for RQ CQ
886 */
887 bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
888 if (bcq == NULL)
889 bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
865 spin_unlock_irqrestore(&dev->flush_q_lock, flags); 890 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
866 if (buddy_cq_found == false) 891
867 return; 892 /* if there is valid buddy cq, look for its completion handler */
868 if (cq->ibcq.comp_handler) { 893 if (bcq && bcq->ibcq.comp_handler) {
869 spin_lock_irqsave(&cq->comp_handler_lock, flags); 894 spin_lock_irqsave(&bcq->comp_handler_lock, flags);
870 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 895 (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
871 spin_unlock_irqrestore(&cq->comp_handler_lock, flags); 896 spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
872 } 897 }
873} 898}
874 899
@@ -935,6 +960,7 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
935 960
936 } while (budget); 961 } while (budget);
937 962
963 eq->aic_obj.eq_intr_cnt++;
938 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); 964 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
939 return IRQ_HANDLED; 965 return IRQ_HANDLED;
940} 966}
@@ -1050,6 +1076,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
1050 attr->max_pd = 1076 attr->max_pd =
1051 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> 1077 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
1052 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; 1078 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
1079 attr->max_dpp_pds =
1080 (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
1081 OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
1053 attr->max_qp = 1082 attr->max_qp =
1054 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >> 1083 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
1055 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT; 1084 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
@@ -1396,6 +1425,122 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1396 return status; 1425 return status;
1397} 1426}
1398 1427
1428
1429static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1430{
1431 int status = -ENOMEM;
1432 size_t pd_bitmap_size;
1433 struct ocrdma_alloc_pd_range *cmd;
1434 struct ocrdma_alloc_pd_range_rsp *rsp;
1435
1436 /* Pre allocate the DPP PDs */
1437 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1438 if (!cmd)
1439 return -ENOMEM;
1440 cmd->pd_count = dev->attr.max_dpp_pds;
1441 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1442 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1443 if (status)
1444 goto mbx_err;
1445 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1446
1447 if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) {
1448 dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
1449 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1450 dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
1451 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1452 dev->pd_mgr->max_dpp_pd = rsp->pd_count;
1453 pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1454 dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
1455 GFP_KERNEL);
1456 }
1457 kfree(cmd);
1458
1459 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1460 if (!cmd)
1461 return -ENOMEM;
1462
1463 cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
1464 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1465 if (status)
1466 goto mbx_err;
1467 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1468 if (rsp->pd_count) {
1469 dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
1470 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1471 dev->pd_mgr->max_normal_pd = rsp->pd_count;
1472 pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1473 dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
1474 GFP_KERNEL);
1475 }
1476
1477 if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
1478 /* Enable PD resource manager */
1479 dev->pd_mgr->pd_prealloc_valid = true;
1480 } else {
1481 return -ENOMEM;
1482 }
1483mbx_err:
1484 kfree(cmd);
1485 return status;
1486}
1487
1488static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
1489{
1490 struct ocrdma_dealloc_pd_range *cmd;
1491
1492 /* return normal PDs to firmware */
1493 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
1494 if (!cmd)
1495 goto mbx_err;
1496
1497 if (dev->pd_mgr->max_normal_pd) {
1498 cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
1499 cmd->pd_count = dev->pd_mgr->max_normal_pd;
1500 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1501 }
1502
1503 if (dev->pd_mgr->max_dpp_pd) {
1504 kfree(cmd);
1505 /* return DPP PDs to firmware */
1506 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
1507 sizeof(*cmd));
1508 if (!cmd)
1509 goto mbx_err;
1510
1511 cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
1512 cmd->pd_count = dev->pd_mgr->max_dpp_pd;
1513 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1514 }
1515mbx_err:
1516 kfree(cmd);
1517}
1518
1519void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
1520{
1521 int status;
1522
1523 dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
1524 GFP_KERNEL);
1525 if (!dev->pd_mgr) {
1526 pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
1527 return;
1528 }
1529 status = ocrdma_mbx_alloc_pd_range(dev);
1530 if (status) {
1531 pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
1532 __func__, dev->id);
1533 }
1534}
1535
1536static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
1537{
1538 ocrdma_mbx_dealloc_pd_range(dev);
1539 kfree(dev->pd_mgr->pd_norm_bitmap);
1540 kfree(dev->pd_mgr->pd_dpp_bitmap);
1541 kfree(dev->pd_mgr);
1542}
1543
1399static int ocrdma_build_q_conf(u32 *num_entries, int entry_size, 1544static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
1400 int *num_pages, int *page_size) 1545 int *num_pages, int *page_size)
1401{ 1546{
@@ -1896,8 +2041,9 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
1896{ 2041{
1897 bool found; 2042 bool found;
1898 unsigned long flags; 2043 unsigned long flags;
2044 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1899 2045
1900 spin_lock_irqsave(&qp->dev->flush_q_lock, flags); 2046 spin_lock_irqsave(&dev->flush_q_lock, flags);
1901 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); 2047 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1902 if (!found) 2048 if (!found)
1903 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head); 2049 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
@@ -1906,7 +2052,7 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
1906 if (!found) 2052 if (!found)
1907 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head); 2053 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
1908 } 2054 }
1909 spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags); 2055 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1910} 2056}
1911 2057
1912static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp) 2058static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
@@ -1972,7 +2118,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
1972 int status; 2118 int status;
1973 u32 len, hw_pages, hw_page_size; 2119 u32 len, hw_pages, hw_page_size;
1974 dma_addr_t pa; 2120 dma_addr_t pa;
1975 struct ocrdma_dev *dev = qp->dev; 2121 struct ocrdma_pd *pd = qp->pd;
2122 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1976 struct pci_dev *pdev = dev->nic_info.pdev; 2123 struct pci_dev *pdev = dev->nic_info.pdev;
1977 u32 max_wqe_allocated; 2124 u32 max_wqe_allocated;
1978 u32 max_sges = attrs->cap.max_send_sge; 2125 u32 max_sges = attrs->cap.max_send_sge;
@@ -2027,7 +2174,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
2027 int status; 2174 int status;
2028 u32 len, hw_pages, hw_page_size; 2175 u32 len, hw_pages, hw_page_size;
2029 dma_addr_t pa = 0; 2176 dma_addr_t pa = 0;
2030 struct ocrdma_dev *dev = qp->dev; 2177 struct ocrdma_pd *pd = qp->pd;
2178 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2031 struct pci_dev *pdev = dev->nic_info.pdev; 2179 struct pci_dev *pdev = dev->nic_info.pdev;
2032 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1; 2180 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
2033 2181
@@ -2086,7 +2234,8 @@ static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
2086static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, 2234static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
2087 struct ocrdma_qp *qp) 2235 struct ocrdma_qp *qp)
2088{ 2236{
2089 struct ocrdma_dev *dev = qp->dev; 2237 struct ocrdma_pd *pd = qp->pd;
2238 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2090 struct pci_dev *pdev = dev->nic_info.pdev; 2239 struct pci_dev *pdev = dev->nic_info.pdev;
2091 dma_addr_t pa = 0; 2240 dma_addr_t pa = 0;
2092 int ird_page_size = dev->attr.ird_page_size; 2241 int ird_page_size = dev->attr.ird_page_size;
@@ -2157,8 +2306,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
2157{ 2306{
2158 int status = -ENOMEM; 2307 int status = -ENOMEM;
2159 u32 flags = 0; 2308 u32 flags = 0;
2160 struct ocrdma_dev *dev = qp->dev;
2161 struct ocrdma_pd *pd = qp->pd; 2309 struct ocrdma_pd *pd = qp->pd;
2310 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2162 struct pci_dev *pdev = dev->nic_info.pdev; 2311 struct pci_dev *pdev = dev->nic_info.pdev;
2163 struct ocrdma_cq *cq; 2312 struct ocrdma_cq *cq;
2164 struct ocrdma_create_qp_req *cmd; 2313 struct ocrdma_create_qp_req *cmd;
@@ -2281,11 +2430,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2281 union ib_gid sgid, zgid; 2430 union ib_gid sgid, zgid;
2282 u32 vlan_id; 2431 u32 vlan_id;
2283 u8 mac_addr[6]; 2432 u8 mac_addr[6];
2433 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2284 2434
2285 if ((ah_attr->ah_flags & IB_AH_GRH) == 0) 2435 if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
2286 return -EINVAL; 2436 return -EINVAL;
2287 if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0)) 2437 if (atomic_cmpxchg(&dev->update_sl, 1, 0))
2288 ocrdma_init_service_level(qp->dev); 2438 ocrdma_init_service_level(dev);
2289 cmd->params.tclass_sq_psn |= 2439 cmd->params.tclass_sq_psn |=
2290 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); 2440 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2291 cmd->params.rnt_rc_sl_fl |= 2441 cmd->params.rnt_rc_sl_fl |=
@@ -2296,7 +2446,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2296 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID; 2446 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2297 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], 2447 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
2298 sizeof(cmd->params.dgid)); 2448 sizeof(cmd->params.dgid));
2299 status = ocrdma_query_gid(&qp->dev->ibdev, 1, 2449 status = ocrdma_query_gid(&dev->ibdev, 1,
2300 ah_attr->grh.sgid_index, &sgid); 2450 ah_attr->grh.sgid_index, &sgid);
2301 if (status) 2451 if (status)
2302 return status; 2452 return status;
@@ -2307,7 +2457,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2307 2457
2308 qp->sgid_idx = ah_attr->grh.sgid_index; 2458 qp->sgid_idx = ah_attr->grh.sgid_index;
2309 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); 2459 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
2310 ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]); 2460 status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
2461 if (status)
2462 return status;
2311 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | 2463 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2312 (mac_addr[2] << 16) | (mac_addr[3] << 24); 2464 (mac_addr[2] << 16) | (mac_addr[3] << 24);
2313 /* convert them to LE format. */ 2465 /* convert them to LE format. */
@@ -2320,7 +2472,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2320 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; 2472 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2321 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; 2473 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2322 cmd->params.rnt_rc_sl_fl |= 2474 cmd->params.rnt_rc_sl_fl |=
2323 (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; 2475 (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
2324 } 2476 }
2325 return 0; 2477 return 0;
2326} 2478}
@@ -2330,6 +2482,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2330 struct ib_qp_attr *attrs, int attr_mask) 2482 struct ib_qp_attr *attrs, int attr_mask)
2331{ 2483{
2332 int status = 0; 2484 int status = 0;
2485 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2333 2486
2334 if (attr_mask & IB_QP_PKEY_INDEX) { 2487 if (attr_mask & IB_QP_PKEY_INDEX) {
2335 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index & 2488 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
@@ -2347,12 +2500,12 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2347 return status; 2500 return status;
2348 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { 2501 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
2349 /* set the default mac address for UD, GSI QPs */ 2502 /* set the default mac address for UD, GSI QPs */
2350 cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] | 2503 cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
2351 (qp->dev->nic_info.mac_addr[1] << 8) | 2504 (dev->nic_info.mac_addr[1] << 8) |
2352 (qp->dev->nic_info.mac_addr[2] << 16) | 2505 (dev->nic_info.mac_addr[2] << 16) |
2353 (qp->dev->nic_info.mac_addr[3] << 24); 2506 (dev->nic_info.mac_addr[3] << 24);
2354 cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] | 2507 cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
2355 (qp->dev->nic_info.mac_addr[5] << 8); 2508 (dev->nic_info.mac_addr[5] << 8);
2356 } 2509 }
2357 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) && 2510 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2358 attrs->en_sqd_async_notify) { 2511 attrs->en_sqd_async_notify) {
@@ -2409,7 +2562,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2409 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID; 2562 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
2410 } 2563 }
2411 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 2564 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2412 if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) { 2565 if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
2413 status = -EINVAL; 2566 status = -EINVAL;
2414 goto pmtu_err; 2567 goto pmtu_err;
2415 } 2568 }
@@ -2417,7 +2570,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2417 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID; 2570 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
2418 } 2571 }
2419 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 2572 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2420 if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) { 2573 if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
2421 status = -EINVAL; 2574 status = -EINVAL;
2422 goto pmtu_err; 2575 goto pmtu_err;
2423 } 2576 }
@@ -2870,6 +3023,82 @@ done:
2870 return status; 3023 return status;
2871} 3024}
2872 3025
3026static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3027 int num)
3028{
3029 int i, status = -ENOMEM;
3030 struct ocrdma_modify_eqd_req *cmd;
3031
3032 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
3033 if (!cmd)
3034 return status;
3035
3036 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
3037 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
3038
3039 cmd->cmd.num_eq = num;
3040 for (i = 0; i < num; i++) {
3041 cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
3042 cmd->cmd.set_eqd[i].phase = 0;
3043 cmd->cmd.set_eqd[i].delay_multiplier =
3044 (eq[i].aic_obj.prev_eqd * 65)/100;
3045 }
3046 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
3047 if (status)
3048 goto mbx_err;
3049mbx_err:
3050 kfree(cmd);
3051 return status;
3052}
3053
3054static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3055 int num)
3056{
3057 int num_eqs, i = 0;
3058 if (num > 8) {
3059 while (num) {
3060 num_eqs = min(num, 8);
3061 ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
3062 i += num_eqs;
3063 num -= num_eqs;
3064 }
3065 } else {
3066 ocrdma_mbx_modify_eqd(dev, eq, num);
3067 }
3068 return 0;
3069}
3070
3071void ocrdma_eqd_set_task(struct work_struct *work)
3072{
3073 struct ocrdma_dev *dev =
3074 container_of(work, struct ocrdma_dev, eqd_work.work);
3075 struct ocrdma_eq *eq = 0;
3076 int i, num = 0, status = -EINVAL;
3077 u64 eq_intr;
3078
3079 for (i = 0; i < dev->eq_cnt; i++) {
3080 eq = &dev->eq_tbl[i];
3081 if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
3082 eq_intr = eq->aic_obj.eq_intr_cnt -
3083 eq->aic_obj.prev_eq_intr_cnt;
3084 if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
3085 (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
3086 eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
3087 num++;
3088 } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
3089 (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
3090 eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
3091 num++;
3092 }
3093 }
3094 eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
3095 }
3096
3097 if (num)
3098 status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
3099 schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
3100}
3101
2873int ocrdma_init_hw(struct ocrdma_dev *dev) 3102int ocrdma_init_hw(struct ocrdma_dev *dev)
2874{ 3103{
2875 int status; 3104 int status;
@@ -2915,6 +3144,7 @@ qpeq_err:
2915 3144
2916void ocrdma_cleanup_hw(struct ocrdma_dev *dev) 3145void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
2917{ 3146{
3147 ocrdma_free_pd_pool(dev);
2918 ocrdma_mbx_delete_ah_tbl(dev); 3148 ocrdma_mbx_delete_ah_tbl(dev);
2919 3149
2920 /* cleanup the eqs */ 3150 /* cleanup the eqs */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index 6eed8f191322..e905972fceb7 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -136,5 +136,7 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
136int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset); 136int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
137char *port_speed_string(struct ocrdma_dev *dev); 137char *port_speed_string(struct ocrdma_dev *dev);
138void ocrdma_init_service_level(struct ocrdma_dev *); 138void ocrdma_init_service_level(struct ocrdma_dev *);
139void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
140void ocrdma_free_pd_range(struct ocrdma_dev *dev);
139 141
140#endif /* __OCRDMA_HW_H__ */ 142#endif /* __OCRDMA_HW_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index b0b2257b8e04..7a2b59aca004 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -239,7 +239,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
239 239
240 dev->ibdev.node_type = RDMA_NODE_IB_CA; 240 dev->ibdev.node_type = RDMA_NODE_IB_CA;
241 dev->ibdev.phys_port_cnt = 1; 241 dev->ibdev.phys_port_cnt = 1;
242 dev->ibdev.num_comp_vectors = 1; 242 dev->ibdev.num_comp_vectors = dev->eq_cnt;
243 243
244 /* mandatory verbs. */ 244 /* mandatory verbs. */
245 dev->ibdev.query_device = ocrdma_query_device; 245 dev->ibdev.query_device = ocrdma_query_device;
@@ -329,6 +329,8 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
329 if (dev->stag_arr == NULL) 329 if (dev->stag_arr == NULL)
330 goto alloc_err; 330 goto alloc_err;
331 331
332 ocrdma_alloc_pd_pool(dev);
333
332 spin_lock_init(&dev->av_tbl.lock); 334 spin_lock_init(&dev->av_tbl.lock);
333 spin_lock_init(&dev->flush_q_lock); 335 spin_lock_init(&dev->flush_q_lock);
334 return 0; 336 return 0;
@@ -491,6 +493,9 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
491 spin_unlock(&ocrdma_devlist_lock); 493 spin_unlock(&ocrdma_devlist_lock);
492 /* Init stats */ 494 /* Init stats */
493 ocrdma_add_port_stats(dev); 495 ocrdma_add_port_stats(dev);
496 /* Interrupt Moderation */
497 INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task);
498 schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
494 499
495 pr_info("%s %s: %s \"%s\" port %d\n", 500 pr_info("%s %s: %s \"%s\" port %d\n",
496 dev_name(&dev->nic_info.pdev->dev), hca_name(dev), 501 dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
@@ -528,11 +533,12 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
528 /* first unregister with stack to stop all the active traffic 533 /* first unregister with stack to stop all the active traffic
529 * of the registered clients. 534 * of the registered clients.
530 */ 535 */
531 ocrdma_rem_port_stats(dev); 536 cancel_delayed_work_sync(&dev->eqd_work);
532 ocrdma_remove_sysfiles(dev); 537 ocrdma_remove_sysfiles(dev);
533
534 ib_unregister_device(&dev->ibdev); 538 ib_unregister_device(&dev->ibdev);
535 539
540 ocrdma_rem_port_stats(dev);
541
536 spin_lock(&ocrdma_devlist_lock); 542 spin_lock(&ocrdma_devlist_lock);
537 list_del_rcu(&dev->entry); 543 list_del_rcu(&dev->entry);
538 spin_unlock(&ocrdma_devlist_lock); 544 spin_unlock(&ocrdma_devlist_lock);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 4e036480c1a8..243c87c8bd65 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -75,6 +75,8 @@ enum {
75 OCRDMA_CMD_DESTROY_RBQ = 26, 75 OCRDMA_CMD_DESTROY_RBQ = 26,
76 76
77 OCRDMA_CMD_GET_RDMA_STATS = 27, 77 OCRDMA_CMD_GET_RDMA_STATS = 27,
78 OCRDMA_CMD_ALLOC_PD_RANGE = 28,
79 OCRDMA_CMD_DEALLOC_PD_RANGE = 29,
78 80
79 OCRDMA_CMD_MAX 81 OCRDMA_CMD_MAX
80}; 82};
@@ -87,6 +89,7 @@ enum {
87 OCRDMA_CMD_CREATE_MQ = 21, 89 OCRDMA_CMD_CREATE_MQ = 21,
88 OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32, 90 OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32,
89 OCRDMA_CMD_GET_FW_VER = 35, 91 OCRDMA_CMD_GET_FW_VER = 35,
92 OCRDMA_CMD_MODIFY_EQ_DELAY = 41,
90 OCRDMA_CMD_DELETE_MQ = 53, 93 OCRDMA_CMD_DELETE_MQ = 53,
91 OCRDMA_CMD_DELETE_CQ = 54, 94 OCRDMA_CMD_DELETE_CQ = 54,
92 OCRDMA_CMD_DELETE_EQ = 55, 95 OCRDMA_CMD_DELETE_EQ = 55,
@@ -101,7 +104,7 @@ enum {
101 QTYPE_MCCQ = 3 104 QTYPE_MCCQ = 3
102}; 105};
103 106
104#define OCRDMA_MAX_SGID 8 107#define OCRDMA_MAX_SGID 16
105 108
106#define OCRDMA_MAX_QP 2048 109#define OCRDMA_MAX_QP 2048
107#define OCRDMA_MAX_CQ 2048 110#define OCRDMA_MAX_CQ 2048
@@ -314,6 +317,29 @@ struct ocrdma_create_eq_rsp {
314 317
315#define OCRDMA_EQ_MINOR_OTHER 0x1 318#define OCRDMA_EQ_MINOR_OTHER 0x1
316 319
320struct ocrmda_set_eqd {
321 u32 eq_id;
322 u32 phase;
323 u32 delay_multiplier;
324};
325
326struct ocrdma_modify_eqd_cmd {
327 struct ocrdma_mbx_hdr req;
328 u32 num_eq;
329 struct ocrmda_set_eqd set_eqd[8];
330} __packed;
331
332struct ocrdma_modify_eqd_req {
333 struct ocrdma_mqe_hdr hdr;
334 struct ocrdma_modify_eqd_cmd cmd;
335};
336
337
338struct ocrdma_modify_eq_delay_rsp {
339 struct ocrdma_mbx_rsp hdr;
340 u32 rsvd0;
341} __packed;
342
317enum { 343enum {
318 OCRDMA_MCQE_STATUS_SHIFT = 0, 344 OCRDMA_MCQE_STATUS_SHIFT = 0,
319 OCRDMA_MCQE_STATUS_MASK = 0xFFFF, 345 OCRDMA_MCQE_STATUS_MASK = 0xFFFF,
@@ -441,7 +467,9 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
441 OCRDMA_DEVICE_FATAL_EVENT = 0x08, 467 OCRDMA_DEVICE_FATAL_EVENT = 0x08,
442 OCRDMA_SRQCAT_ERROR = 0x0E, 468 OCRDMA_SRQCAT_ERROR = 0x0E,
443 OCRDMA_SRQ_LIMIT_EVENT = 0x0F, 469 OCRDMA_SRQ_LIMIT_EVENT = 0x0F,
444 OCRDMA_QP_LAST_WQE_EVENT = 0x10 470 OCRDMA_QP_LAST_WQE_EVENT = 0x10,
471
472 OCRDMA_MAX_ASYNC_ERRORS
445}; 473};
446 474
447/* mailbox command request and responses */ 475/* mailbox command request and responses */
@@ -1297,6 +1325,37 @@ struct ocrdma_dealloc_pd_rsp {
1297 struct ocrdma_mbx_rsp rsp; 1325 struct ocrdma_mbx_rsp rsp;
1298}; 1326};
1299 1327
1328struct ocrdma_alloc_pd_range {
1329 struct ocrdma_mqe_hdr hdr;
1330 struct ocrdma_mbx_hdr req;
1331 u32 enable_dpp_rsvd;
1332 u32 pd_count;
1333};
1334
1335struct ocrdma_alloc_pd_range_rsp {
1336 struct ocrdma_mqe_hdr hdr;
1337 struct ocrdma_mbx_rsp rsp;
1338 u32 dpp_page_pdid;
1339 u32 pd_count;
1340};
1341
1342enum {
1343 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK = 0xFFFF,
1344};
1345
1346struct ocrdma_dealloc_pd_range {
1347 struct ocrdma_mqe_hdr hdr;
1348 struct ocrdma_mbx_hdr req;
1349 u32 start_pd_id;
1350 u32 pd_count;
1351};
1352
1353struct ocrdma_dealloc_pd_range_rsp {
1354 struct ocrdma_mqe_hdr hdr;
1355 struct ocrdma_mbx_hdr req;
1356 u32 rsvd;
1357};
1358
1300enum { 1359enum {
1301 OCRDMA_ADDR_CHECK_ENABLE = 1, 1360 OCRDMA_ADDR_CHECK_ENABLE = 1,
1302 OCRDMA_ADDR_CHECK_DISABLE = 0 1361 OCRDMA_ADDR_CHECK_DISABLE = 0
@@ -1597,7 +1656,9 @@ enum OCRDMA_CQE_STATUS {
1597 OCRDMA_CQE_INV_EEC_STATE_ERR, 1656 OCRDMA_CQE_INV_EEC_STATE_ERR,
1598 OCRDMA_CQE_FATAL_ERR, 1657 OCRDMA_CQE_FATAL_ERR,
1599 OCRDMA_CQE_RESP_TIMEOUT_ERR, 1658 OCRDMA_CQE_RESP_TIMEOUT_ERR,
1600 OCRDMA_CQE_GENERAL_ERR 1659 OCRDMA_CQE_GENERAL_ERR,
1660
1661 OCRDMA_MAX_CQE_ERR
1601}; 1662};
1602 1663
1603enum { 1664enum {
@@ -1673,6 +1734,7 @@ enum {
1673 OCRDMA_FLAG_FENCE_R = 0x8, 1734 OCRDMA_FLAG_FENCE_R = 0x8,
1674 OCRDMA_FLAG_SOLICIT = 0x10, 1735 OCRDMA_FLAG_SOLICIT = 0x10,
1675 OCRDMA_FLAG_IMM = 0x20, 1736 OCRDMA_FLAG_IMM = 0x20,
1737 OCRDMA_FLAG_AH_VLAN_PR = 0x40,
1676 1738
1677 /* Stag flags */ 1739 /* Stag flags */
1678 OCRDMA_LKEY_FLAG_LOCAL_WR = 0x1, 1740 OCRDMA_LKEY_FLAG_LOCAL_WR = 0x1,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 41a9aec9998d..48d7ef51aa0c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -26,6 +26,7 @@
26 *******************************************************************/ 26 *******************************************************************/
27 27
28#include <rdma/ib_addr.h> 28#include <rdma/ib_addr.h>
29#include <rdma/ib_pma.h>
29#include "ocrdma_stats.h" 30#include "ocrdma_stats.h"
30 31
31static struct dentry *ocrdma_dbgfs_dir; 32static struct dentry *ocrdma_dbgfs_dir;
@@ -249,6 +250,27 @@ static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
249 return stats; 250 return stats;
250} 251}
251 252
253static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev)
254{
255 struct ocrdma_rdma_stats_resp *rdma_stats =
256 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
257 struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
258
259 return convert_to_64bit(rx_stats->roce_frames_lo,
260 rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops
261 + (u64)rx_stats->roce_frame_payload_len_drops;
262}
263
264static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev)
265{
266 struct ocrdma_rdma_stats_resp *rdma_stats =
267 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
268 struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
269
270 return (convert_to_64bit(rx_stats->roce_frame_bytes_lo,
271 rx_stats->roce_frame_bytes_hi))/4;
272}
273
252static char *ocrdma_tx_stats(struct ocrdma_dev *dev) 274static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
253{ 275{
254 char *stats = dev->stats_mem.debugfs_mem, *pcur; 276 char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -292,6 +314,37 @@ static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
292 return stats; 314 return stats;
293} 315}
294 316
317static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev)
318{
319 struct ocrdma_rdma_stats_resp *rdma_stats =
320 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
321 struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
322
323 return (convert_to_64bit(tx_stats->send_pkts_lo,
324 tx_stats->send_pkts_hi) +
325 convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) +
326 convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) +
327 convert_to_64bit(tx_stats->read_rsp_pkts_lo,
328 tx_stats->read_rsp_pkts_hi) +
329 convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi));
330}
331
332static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev)
333{
334 struct ocrdma_rdma_stats_resp *rdma_stats =
335 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
336 struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
337
338 return (convert_to_64bit(tx_stats->send_bytes_lo,
339 tx_stats->send_bytes_hi) +
340 convert_to_64bit(tx_stats->write_bytes_lo,
341 tx_stats->write_bytes_hi) +
342 convert_to_64bit(tx_stats->read_req_bytes_lo,
343 tx_stats->read_req_bytes_hi) +
344 convert_to_64bit(tx_stats->read_rsp_bytes_lo,
345 tx_stats->read_rsp_bytes_hi))/4;
346}
347
295static char *ocrdma_wqe_stats(struct ocrdma_dev *dev) 348static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
296{ 349{
297 char *stats = dev->stats_mem.debugfs_mem, *pcur; 350 char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -432,10 +485,118 @@ static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
432 return dev->stats_mem.debugfs_mem; 485 return dev->stats_mem.debugfs_mem;
433} 486}
434 487
488static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
489{
490 char *stats = dev->stats_mem.debugfs_mem, *pcur;
491
492
493 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
494
495 pcur = stats;
496 pcur += ocrdma_add_stat(stats, pcur, "async_cq_err",
497 (u64)(dev->async_err_stats
498 [OCRDMA_CQ_ERROR].counter));
499 pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err",
500 (u64)dev->async_err_stats
501 [OCRDMA_CQ_OVERRUN_ERROR].counter);
502 pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err",
503 (u64)dev->async_err_stats
504 [OCRDMA_CQ_QPCAT_ERROR].counter);
505 pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err",
506 (u64)dev->async_err_stats
507 [OCRDMA_QP_ACCESS_ERROR].counter);
508 pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt",
509 (u64)dev->async_err_stats
510 [OCRDMA_QP_COMM_EST_EVENT].counter);
511 pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt",
512 (u64)dev->async_err_stats
513 [OCRDMA_SQ_DRAINED_EVENT].counter);
514 pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt",
515 (u64)dev->async_err_stats
516 [OCRDMA_DEVICE_FATAL_EVENT].counter);
517 pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err",
518 (u64)dev->async_err_stats
519 [OCRDMA_SRQCAT_ERROR].counter);
520 pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt",
521 (u64)dev->async_err_stats
522 [OCRDMA_SRQ_LIMIT_EVENT].counter);
523 pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt",
524 (u64)dev->async_err_stats
525 [OCRDMA_QP_LAST_WQE_EVENT].counter);
526
527 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err",
528 (u64)dev->cqe_err_stats
529 [OCRDMA_CQE_LOC_LEN_ERR].counter);
530 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err",
531 (u64)dev->cqe_err_stats
532 [OCRDMA_CQE_LOC_QP_OP_ERR].counter);
533 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err",
534 (u64)dev->cqe_err_stats
535 [OCRDMA_CQE_LOC_EEC_OP_ERR].counter);
536 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err",
537 (u64)dev->cqe_err_stats
538 [OCRDMA_CQE_LOC_PROT_ERR].counter);
539 pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err",
540 (u64)dev->cqe_err_stats
541 [OCRDMA_CQE_WR_FLUSH_ERR].counter);
542 pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err",
543 (u64)dev->cqe_err_stats
544 [OCRDMA_CQE_MW_BIND_ERR].counter);
545 pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err",
546 (u64)dev->cqe_err_stats
547 [OCRDMA_CQE_BAD_RESP_ERR].counter);
548 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err",
549 (u64)dev->cqe_err_stats
550 [OCRDMA_CQE_LOC_ACCESS_ERR].counter);
551 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err",
552 (u64)dev->cqe_err_stats
553 [OCRDMA_CQE_REM_INV_REQ_ERR].counter);
554 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err",
555 (u64)dev->cqe_err_stats
556 [OCRDMA_CQE_REM_ACCESS_ERR].counter);
557 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err",
558 (u64)dev->cqe_err_stats
559 [OCRDMA_CQE_REM_OP_ERR].counter);
560 pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err",
561 (u64)dev->cqe_err_stats
562 [OCRDMA_CQE_RETRY_EXC_ERR].counter);
563 pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err",
564 (u64)dev->cqe_err_stats
565 [OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter);
566 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err",
567 (u64)dev->cqe_err_stats
568 [OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter);
569 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err",
570 (u64)dev->cqe_err_stats
571 [OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter);
572 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err",
573 (u64)dev->cqe_err_stats
574 [OCRDMA_CQE_REM_ABORT_ERR].counter);
575 pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err",
576 (u64)dev->cqe_err_stats
577 [OCRDMA_CQE_INV_EECN_ERR].counter);
578 pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err",
579 (u64)dev->cqe_err_stats
580 [OCRDMA_CQE_INV_EEC_STATE_ERR].counter);
581 pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err",
582 (u64)dev->cqe_err_stats
583 [OCRDMA_CQE_FATAL_ERR].counter);
584 pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err",
585 (u64)dev->cqe_err_stats
586 [OCRDMA_CQE_RESP_TIMEOUT_ERR].counter);
587 pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err",
588 (u64)dev->cqe_err_stats
589 [OCRDMA_CQE_GENERAL_ERR].counter);
590 return stats;
591}
592
435static void ocrdma_update_stats(struct ocrdma_dev *dev) 593static void ocrdma_update_stats(struct ocrdma_dev *dev)
436{ 594{
437 ulong now = jiffies, secs; 595 ulong now = jiffies, secs;
438 int status = 0; 596 int status = 0;
597 struct ocrdma_rdma_stats_resp *rdma_stats =
598 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
599 struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
439 600
440 secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U; 601 secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
441 if (secs) { 602 if (secs) {
@@ -444,10 +605,74 @@ static void ocrdma_update_stats(struct ocrdma_dev *dev)
444 if (status) 605 if (status)
445 pr_err("%s: stats mbox failed with status = %d\n", 606 pr_err("%s: stats mbox failed with status = %d\n",
446 __func__, status); 607 __func__, status);
608 /* Update PD counters from PD resource manager */
609 if (dev->pd_mgr->pd_prealloc_valid) {
610 rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count;
611 rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count;
612 /* Threshold stata*/
613 rsrc_stats = &rdma_stats->th_rsrc_stats;
614 rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh;
615 rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh;
616 }
447 dev->last_stats_time = jiffies; 617 dev->last_stats_time = jiffies;
448 } 618 }
449} 619}
450 620
621static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
622 const char __user *buffer,
623 size_t count, loff_t *ppos)
624{
625 char tmp_str[32];
626 long reset;
627 int status = 0;
628 struct ocrdma_stats *pstats = filp->private_data;
629 struct ocrdma_dev *dev = pstats->dev;
630
631 if (count > 32)
632 goto err;
633
634 if (copy_from_user(tmp_str, buffer, count))
635 goto err;
636
637 tmp_str[count-1] = '\0';
638 if (kstrtol(tmp_str, 10, &reset))
639 goto err;
640
641 switch (pstats->type) {
642 case OCRDMA_RESET_STATS:
643 if (reset) {
644 status = ocrdma_mbx_rdma_stats(dev, true);
645 if (status) {
646 pr_err("Failed to reset stats = %d", status);
647 goto err;
648 }
649 }
650 break;
651 default:
652 goto err;
653 }
654
655 return count;
656err:
657 return -EFAULT;
658}
659
660int ocrdma_pma_counters(struct ocrdma_dev *dev,
661 struct ib_mad *out_mad)
662{
663 struct ib_pma_portcounters *pma_cnt;
664
665 memset(out_mad->data, 0, sizeof out_mad->data);
666 pma_cnt = (void *)(out_mad->data + 40);
667 ocrdma_update_stats(dev);
668
669 pma_cnt->port_xmit_data = cpu_to_be32(ocrdma_sysfs_xmit_data(dev));
670 pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
671 pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
672 pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
673 return 0;
674}
675
451static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer, 676static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
452 size_t usr_buf_len, loff_t *ppos) 677 size_t usr_buf_len, loff_t *ppos)
453{ 678{
@@ -492,6 +717,9 @@ static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
492 case OCRDMA_RX_DBG_STATS: 717 case OCRDMA_RX_DBG_STATS:
493 data = ocrdma_rx_dbg_stats(dev); 718 data = ocrdma_rx_dbg_stats(dev);
494 break; 719 break;
720 case OCRDMA_DRV_STATS:
721 data = ocrdma_driver_dbg_stats(dev);
722 break;
495 723
496 default: 724 default:
497 status = -EFAULT; 725 status = -EFAULT;
@@ -514,6 +742,7 @@ static const struct file_operations ocrdma_dbg_ops = {
514 .owner = THIS_MODULE, 742 .owner = THIS_MODULE,
515 .open = simple_open, 743 .open = simple_open,
516 .read = ocrdma_dbgfs_ops_read, 744 .read = ocrdma_dbgfs_ops_read,
745 .write = ocrdma_dbgfs_ops_write,
517}; 746};
518 747
519void ocrdma_add_port_stats(struct ocrdma_dev *dev) 748void ocrdma_add_port_stats(struct ocrdma_dev *dev)
@@ -582,6 +811,18 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
582 &dev->rx_dbg_stats, &ocrdma_dbg_ops)) 811 &dev->rx_dbg_stats, &ocrdma_dbg_ops))
583 goto err; 812 goto err;
584 813
814 dev->driver_stats.type = OCRDMA_DRV_STATS;
815 dev->driver_stats.dev = dev;
816 if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
817 &dev->driver_stats, &ocrdma_dbg_ops))
818 goto err;
819
820 dev->reset_stats.type = OCRDMA_RESET_STATS;
821 dev->reset_stats.dev = dev;
822 if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
823 &dev->reset_stats, &ocrdma_dbg_ops))
824 goto err;
825
585 /* Now create dma_mem for stats mbx command */ 826 /* Now create dma_mem for stats mbx command */
586 if (!ocrdma_alloc_stats_mem(dev)) 827 if (!ocrdma_alloc_stats_mem(dev))
587 goto err; 828 goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index 5f5e20c46d7c..091edd68a8a3 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -43,12 +43,16 @@ enum OCRDMA_STATS_TYPE {
43 OCRDMA_RXQP_ERRSTATS, 43 OCRDMA_RXQP_ERRSTATS,
44 OCRDMA_TXQP_ERRSTATS, 44 OCRDMA_TXQP_ERRSTATS,
45 OCRDMA_TX_DBG_STATS, 45 OCRDMA_TX_DBG_STATS,
46 OCRDMA_RX_DBG_STATS 46 OCRDMA_RX_DBG_STATS,
47 OCRDMA_DRV_STATS,
48 OCRDMA_RESET_STATS
47}; 49};
48 50
49void ocrdma_rem_debugfs(void); 51void ocrdma_rem_debugfs(void);
50void ocrdma_init_debugfs(void); 52void ocrdma_init_debugfs(void);
51void ocrdma_rem_port_stats(struct ocrdma_dev *dev); 53void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
52void ocrdma_add_port_stats(struct ocrdma_dev *dev); 54void ocrdma_add_port_stats(struct ocrdma_dev *dev);
55int ocrdma_pma_counters(struct ocrdma_dev *dev,
56 struct ib_mad *out_mad);
53 57
54#endif /* __OCRDMA_STATS_H__ */ 58#endif /* __OCRDMA_STATS_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index fb8d8c4dfbb9..877175563634 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
53 53
54 dev = get_ocrdma_dev(ibdev); 54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0, sizeof(*sgid)); 55 memset(sgid, 0, sizeof(*sgid));
56 if (index > OCRDMA_MAX_SGID) 56 if (index >= OCRDMA_MAX_SGID)
57 return -EINVAL; 57 return -EINVAL;
58 58
59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); 59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -253,6 +253,107 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
253 return found; 253 return found;
254} 254}
255 255
256
257static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
258{
259 u16 pd_bitmap_idx = 0;
260 const unsigned long *pd_bitmap;
261
262 if (dpp_pool) {
263 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
264 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
265 dev->pd_mgr->max_dpp_pd);
266 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
267 dev->pd_mgr->pd_dpp_count++;
268 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
269 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
270 } else {
271 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
272 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
273 dev->pd_mgr->max_normal_pd);
274 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
275 dev->pd_mgr->pd_norm_count++;
276 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
277 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
278 }
279 return pd_bitmap_idx;
280}
281
282static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
283 bool dpp_pool)
284{
285 u16 pd_count;
286 u16 pd_bit_index;
287
288 pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
289 dev->pd_mgr->pd_norm_count;
290 if (pd_count == 0)
291 return -EINVAL;
292
293 if (dpp_pool) {
294 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
295 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
296 return -EINVAL;
297 } else {
298 __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
299 dev->pd_mgr->pd_dpp_count--;
300 }
301 } else {
302 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
303 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
304 return -EINVAL;
305 } else {
306 __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
307 dev->pd_mgr->pd_norm_count--;
308 }
309 }
310
311 return 0;
312}
313
314static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
315 bool dpp_pool)
316{
317 int status;
318
319 mutex_lock(&dev->dev_lock);
320 status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
321 mutex_unlock(&dev->dev_lock);
322 return status;
323}
324
325static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
326{
327 u16 pd_idx = 0;
328 int status = 0;
329
330 mutex_lock(&dev->dev_lock);
331 if (pd->dpp_enabled) {
332 /* try allocating DPP PD, if not available then normal PD */
333 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
334 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
335 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
336 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
337 } else if (dev->pd_mgr->pd_norm_count <
338 dev->pd_mgr->max_normal_pd) {
339 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
340 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
341 pd->dpp_enabled = false;
342 } else {
343 status = -EINVAL;
344 }
345 } else {
346 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
347 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
348 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
349 } else {
350 status = -EINVAL;
351 }
352 }
353 mutex_unlock(&dev->dev_lock);
354 return status;
355}
356
256static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, 357static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
257 struct ocrdma_ucontext *uctx, 358 struct ocrdma_ucontext *uctx,
258 struct ib_udata *udata) 359 struct ib_udata *udata)
@@ -272,6 +373,11 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
272 dev->attr.wqe_size) : 0; 373 dev->attr.wqe_size) : 0;
273 } 374 }
274 375
376 if (dev->pd_mgr->pd_prealloc_valid) {
377 status = ocrdma_get_pd_num(dev, pd);
378 return (status == 0) ? pd : ERR_PTR(status);
379 }
380
275retry: 381retry:
276 status = ocrdma_mbx_alloc_pd(dev, pd); 382 status = ocrdma_mbx_alloc_pd(dev, pd);
277 if (status) { 383 if (status) {
@@ -299,7 +405,11 @@ static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
299{ 405{
300 int status = 0; 406 int status = 0;
301 407
302 status = ocrdma_mbx_dealloc_pd(dev, pd); 408 if (dev->pd_mgr->pd_prealloc_valid)
409 status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
410 else
411 status = ocrdma_mbx_dealloc_pd(dev, pd);
412
303 kfree(pd); 413 kfree(pd);
304 return status; 414 return status;
305} 415}
@@ -325,7 +435,6 @@ err:
325 435
326static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) 436static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
327{ 437{
328 int status = 0;
329 struct ocrdma_pd *pd = uctx->cntxt_pd; 438 struct ocrdma_pd *pd = uctx->cntxt_pd;
330 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 439 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
331 440
@@ -334,8 +443,8 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
334 __func__, dev->id, pd->id); 443 __func__, dev->id, pd->id);
335 } 444 }
336 uctx->cntxt_pd = NULL; 445 uctx->cntxt_pd = NULL;
337 status = _ocrdma_dealloc_pd(dev, pd); 446 (void)_ocrdma_dealloc_pd(dev, pd);
338 return status; 447 return 0;
339} 448}
340 449
341static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) 450static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
@@ -569,7 +678,7 @@ err:
569 if (is_uctx_pd) { 678 if (is_uctx_pd) {
570 ocrdma_release_ucontext_pd(uctx); 679 ocrdma_release_ucontext_pd(uctx);
571 } else { 680 } else {
572 status = ocrdma_mbx_dealloc_pd(dev, pd); 681 status = _ocrdma_dealloc_pd(dev, pd);
573 kfree(pd); 682 kfree(pd);
574 } 683 }
575exit: 684exit:
@@ -837,9 +946,8 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
837{ 946{
838 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); 947 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
839 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); 948 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
840 int status;
841 949
842 status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); 950 (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
843 951
844 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 952 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
845 953
@@ -850,11 +958,10 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
850 958
851 /* Don't stop cleanup, in case FW is unresponsive */ 959 /* Don't stop cleanup, in case FW is unresponsive */
852 if (dev->mqe_ctx.fw_error_state) { 960 if (dev->mqe_ctx.fw_error_state) {
853 status = 0;
854 pr_err("%s(%d) fw not responding.\n", 961 pr_err("%s(%d) fw not responding.\n",
855 __func__, dev->id); 962 __func__, dev->id);
856 } 963 }
857 return status; 964 return 0;
858} 965}
859 966
860static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, 967static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
@@ -986,7 +1093,6 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
986 1093
987int ocrdma_destroy_cq(struct ib_cq *ibcq) 1094int ocrdma_destroy_cq(struct ib_cq *ibcq)
988{ 1095{
989 int status;
990 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 1096 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
991 struct ocrdma_eq *eq = NULL; 1097 struct ocrdma_eq *eq = NULL;
992 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 1098 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
@@ -1003,7 +1109,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
1003 synchronize_irq(irq); 1109 synchronize_irq(irq);
1004 ocrdma_flush_cq(cq); 1110 ocrdma_flush_cq(cq);
1005 1111
1006 status = ocrdma_mbx_destroy_cq(dev, cq); 1112 (void)ocrdma_mbx_destroy_cq(dev, cq);
1007 if (cq->ucontext) { 1113 if (cq->ucontext) {
1008 pdid = cq->ucontext->cntxt_pd->id; 1114 pdid = cq->ucontext->cntxt_pd->id;
1009 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, 1115 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
@@ -1014,7 +1120,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
1014 } 1120 }
1015 1121
1016 kfree(cq); 1122 kfree(cq);
1017 return status; 1123 return 0;
1018} 1124}
1019 1125
1020static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) 1126static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
@@ -1113,8 +1219,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1113 int status = 0; 1219 int status = 0;
1114 u64 usr_db; 1220 u64 usr_db;
1115 struct ocrdma_create_qp_uresp uresp; 1221 struct ocrdma_create_qp_uresp uresp;
1116 struct ocrdma_dev *dev = qp->dev;
1117 struct ocrdma_pd *pd = qp->pd; 1222 struct ocrdma_pd *pd = qp->pd;
1223 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1118 1224
1119 memset(&uresp, 0, sizeof(uresp)); 1225 memset(&uresp, 0, sizeof(uresp));
1120 usr_db = dev->nic_info.unmapped_db + 1226 usr_db = dev->nic_info.unmapped_db +
@@ -1253,7 +1359,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1253 status = -ENOMEM; 1359 status = -ENOMEM;
1254 goto gen_err; 1360 goto gen_err;
1255 } 1361 }
1256 qp->dev = dev;
1257 ocrdma_set_qp_init_params(qp, pd, attrs); 1362 ocrdma_set_qp_init_params(qp, pd, attrs);
1258 if (udata == NULL) 1363 if (udata == NULL)
1259 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | 1364 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
@@ -1312,7 +1417,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1312 enum ib_qp_state old_qps; 1417 enum ib_qp_state old_qps;
1313 1418
1314 qp = get_ocrdma_qp(ibqp); 1419 qp = get_ocrdma_qp(ibqp);
1315 dev = qp->dev; 1420 dev = get_ocrdma_dev(ibqp->device);
1316 if (attr_mask & IB_QP_STATE) 1421 if (attr_mask & IB_QP_STATE)
1317 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); 1422 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1318 /* if new and previous states are same hw doesn't need to 1423 /* if new and previous states are same hw doesn't need to
@@ -1335,7 +1440,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1335 enum ib_qp_state old_qps, new_qps; 1440 enum ib_qp_state old_qps, new_qps;
1336 1441
1337 qp = get_ocrdma_qp(ibqp); 1442 qp = get_ocrdma_qp(ibqp);
1338 dev = qp->dev; 1443 dev = get_ocrdma_dev(ibqp->device);
1339 1444
1340 /* syncronize with multiple context trying to change, retrive qps */ 1445 /* syncronize with multiple context trying to change, retrive qps */
1341 mutex_lock(&dev->dev_lock); 1446 mutex_lock(&dev->dev_lock);
@@ -1402,7 +1507,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1402 u32 qp_state; 1507 u32 qp_state;
1403 struct ocrdma_qp_params params; 1508 struct ocrdma_qp_params params;
1404 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 1509 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1405 struct ocrdma_dev *dev = qp->dev; 1510 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1406 1511
1407 memset(&params, 0, sizeof(params)); 1512 memset(&params, 0, sizeof(params));
1408 mutex_lock(&dev->dev_lock); 1513 mutex_lock(&dev->dev_lock);
@@ -1412,8 +1517,6 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1412 goto mbx_err; 1517 goto mbx_err;
1413 if (qp->qp_type == IB_QPT_UD) 1518 if (qp->qp_type == IB_QPT_UD)
1414 qp_attr->qkey = params.qkey; 1519 qp_attr->qkey = params.qkey;
1415 qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
1416 qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
1417 qp_attr->path_mtu = 1520 qp_attr->path_mtu =
1418 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & 1521 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1419 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> 1522 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
@@ -1468,6 +1571,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1468 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); 1571 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1469 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> 1572 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1470 OCRDMA_QP_PARAMS_STATE_SHIFT; 1573 OCRDMA_QP_PARAMS_STATE_SHIFT;
1574 qp_attr->qp_state = get_ibqp_state(qp_state);
1575 qp_attr->cur_qp_state = qp_attr->qp_state;
1471 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; 1576 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1472 qp_attr->max_dest_rd_atomic = 1577 qp_attr->max_dest_rd_atomic =
1473 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; 1578 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
@@ -1475,19 +1580,18 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1475 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; 1580 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1476 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & 1581 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1477 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; 1582 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1583 /* Sync driver QP state with FW */
1584 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1478mbx_err: 1585mbx_err:
1479 return status; 1586 return status;
1480} 1587}
1481 1588
1482static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) 1589static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1483{ 1590{
1484 int i = idx / 32; 1591 unsigned int i = idx / 32;
1485 unsigned int mask = (1 << (idx % 32)); 1592 u32 mask = (1U << (idx % 32));
1486 1593
1487 if (srq->idx_bit_fields[i] & mask) 1594 srq->idx_bit_fields[i] ^= mask;
1488 srq->idx_bit_fields[i] &= ~mask;
1489 else
1490 srq->idx_bit_fields[i] |= mask;
1491} 1595}
1492 1596
1493static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) 1597static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
@@ -1596,7 +1700,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1596{ 1700{
1597 int found = false; 1701 int found = false;
1598 unsigned long flags; 1702 unsigned long flags;
1599 struct ocrdma_dev *dev = qp->dev; 1703 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1600 /* sync with any active CQ poll */ 1704 /* sync with any active CQ poll */
1601 1705
1602 spin_lock_irqsave(&dev->flush_q_lock, flags); 1706 spin_lock_irqsave(&dev->flush_q_lock, flags);
@@ -1613,7 +1717,6 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1613 1717
1614int ocrdma_destroy_qp(struct ib_qp *ibqp) 1718int ocrdma_destroy_qp(struct ib_qp *ibqp)
1615{ 1719{
1616 int status;
1617 struct ocrdma_pd *pd; 1720 struct ocrdma_pd *pd;
1618 struct ocrdma_qp *qp; 1721 struct ocrdma_qp *qp;
1619 struct ocrdma_dev *dev; 1722 struct ocrdma_dev *dev;
@@ -1622,7 +1725,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1622 unsigned long flags; 1725 unsigned long flags;
1623 1726
1624 qp = get_ocrdma_qp(ibqp); 1727 qp = get_ocrdma_qp(ibqp);
1625 dev = qp->dev; 1728 dev = get_ocrdma_dev(ibqp->device);
1626 1729
1627 attrs.qp_state = IB_QPS_ERR; 1730 attrs.qp_state = IB_QPS_ERR;
1628 pd = qp->pd; 1731 pd = qp->pd;
@@ -1635,7 +1738,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1635 * discarded until the old CQEs are discarded. 1738 * discarded until the old CQEs are discarded.
1636 */ 1739 */
1637 mutex_lock(&dev->dev_lock); 1740 mutex_lock(&dev->dev_lock);
1638 status = ocrdma_mbx_destroy_qp(dev, qp); 1741 (void) ocrdma_mbx_destroy_qp(dev, qp);
1639 1742
1640 /* 1743 /*
1641 * acquire CQ lock while destroy is in progress, in order to 1744 * acquire CQ lock while destroy is in progress, in order to
@@ -1670,7 +1773,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1670 kfree(qp->wqe_wr_id_tbl); 1773 kfree(qp->wqe_wr_id_tbl);
1671 kfree(qp->rqe_wr_id_tbl); 1774 kfree(qp->rqe_wr_id_tbl);
1672 kfree(qp); 1775 kfree(qp);
1673 return status; 1776 return 0;
1674} 1777}
1675 1778
1676static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, 1779static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
@@ -1831,6 +1934,8 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1831 else 1934 else
1832 ud_hdr->qkey = wr->wr.ud.remote_qkey; 1935 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1833 ud_hdr->rsvd_ahid = ah->id; 1936 ud_hdr->rsvd_ahid = ah->id;
1937 if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
1938 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
1834} 1939}
1835 1940
1836static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, 1941static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
@@ -2007,11 +2112,12 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2007 u64 fbo; 2112 u64 fbo;
2008 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); 2113 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2009 struct ocrdma_mr *mr; 2114 struct ocrdma_mr *mr;
2115 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2010 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); 2116 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2011 2117
2012 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); 2118 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2013 2119
2014 if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr) 2120 if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
2015 return -EINVAL; 2121 return -EINVAL;
2016 2122
2017 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); 2123 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
@@ -2039,7 +2145,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2039 fast_reg->size_sge = 2145 fast_reg->size_sge =
2040 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); 2146 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
2041 mr = (struct ocrdma_mr *) (unsigned long) 2147 mr = (struct ocrdma_mr *) (unsigned long)
2042 qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; 2148 dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
2043 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); 2149 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
2044 return 0; 2150 return 0;
2045} 2151}
@@ -2112,8 +2218,6 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2112 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); 2218 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2113 status = ocrdma_build_write(qp, hdr, wr); 2219 status = ocrdma_build_write(qp, hdr, wr);
2114 break; 2220 break;
2115 case IB_WR_RDMA_READ_WITH_INV:
2116 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2117 case IB_WR_RDMA_READ: 2221 case IB_WR_RDMA_READ:
2118 ocrdma_build_read(qp, hdr, wr); 2222 ocrdma_build_read(qp, hdr, wr);
2119 break; 2223 break;
@@ -2484,8 +2588,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2484 bool *polled, bool *stop) 2588 bool *polled, bool *stop)
2485{ 2589{
2486 bool expand; 2590 bool expand;
2591 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2487 int status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2592 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2488 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2593 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2594 if (status < OCRDMA_MAX_CQE_ERR)
2595 atomic_inc(&dev->cqe_err_stats[status]);
2489 2596
2490 /* when hw sq is empty, but rq is not empty, so we continue 2597 /* when hw sq is empty, but rq is not empty, so we continue
2491 * to keep the cqe in order to get the cq event again. 2598 * to keep the cqe in order to get the cq event again.
@@ -2604,6 +2711,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2604 int status) 2711 int status)
2605{ 2712{
2606 bool expand; 2713 bool expand;
2714 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2715
2716 if (status < OCRDMA_MAX_CQE_ERR)
2717 atomic_inc(&dev->cqe_err_stats[status]);
2607 2718
2608 /* when hw_rq is empty, but wq is not empty, so continue 2719 /* when hw_rq is empty, but wq is not empty, so continue
2609 * to keep the cqe to get the cq event again. 2720 * to keep the cqe to get the cq event again.
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index c00ae093b6f8..ffd48bfc4923 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1082,12 +1082,6 @@ struct qib_devdata {
1082 /* control high-level access to EEPROM */ 1082 /* control high-level access to EEPROM */
1083 struct mutex eep_lock; 1083 struct mutex eep_lock;
1084 uint64_t traffic_wds; 1084 uint64_t traffic_wds;
1085 /* active time is kept in seconds, but logged in hours */
1086 atomic_t active_time;
1087 /* Below are nominal shadow of EEPROM, new since last EEPROM update */
1088 uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
1089 uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
1090 uint16_t eep_hrs;
1091 /* 1085 /*
1092 * masks for which bits of errs, hwerrs that cause 1086 * masks for which bits of errs, hwerrs that cause
1093 * each of the counters to increment. 1087 * each of the counters to increment.
@@ -1309,8 +1303,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
1309int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, 1303int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
1310 const void *buffer, int len); 1304 const void *buffer, int len);
1311void qib_get_eeprom_info(struct qib_devdata *); 1305void qib_get_eeprom_info(struct qib_devdata *);
1312int qib_update_eeprom_log(struct qib_devdata *dd); 1306#define qib_inc_eeprom_err(dd, eidx, incr)
1313void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
1314void qib_dump_lookup_output_queue(struct qib_devdata *); 1307void qib_dump_lookup_output_queue(struct qib_devdata *);
1315void qib_force_pio_avail_update(struct qib_devdata *); 1308void qib_force_pio_avail_update(struct qib_devdata *);
1316void qib_clear_symerror_on_linkup(unsigned long opaque); 1309void qib_clear_symerror_on_linkup(unsigned long opaque);
@@ -1467,11 +1460,14 @@ const char *qib_get_unit_name(int unit);
1467 * Flush write combining store buffers (if present) and perform a write 1460 * Flush write combining store buffers (if present) and perform a write
1468 * barrier. 1461 * barrier.
1469 */ 1462 */
1463static inline void qib_flush_wc(void)
1464{
1470#if defined(CONFIG_X86_64) 1465#if defined(CONFIG_X86_64)
1471#define qib_flush_wc() asm volatile("sfence" : : : "memory") 1466 asm volatile("sfence" : : : "memory");
1472#else 1467#else
1473#define qib_flush_wc() wmb() /* no reorder around wc flush */ 1468 wmb(); /* no reorder around wc flush */
1474#endif 1469#endif
1470}
1475 1471
1476/* global module parameter variables */ 1472/* global module parameter variables */
1477extern unsigned qib_ibmtu; 1473extern unsigned qib_ibmtu;
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
index 5670ace27c63..4fb78abd8ba1 100644
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -257,7 +257,7 @@ struct qib_base_info {
257 257
258 /* shared memory page for send buffer disarm status */ 258 /* shared memory page for send buffer disarm status */
259 __u64 spi_sendbuf_status; 259 __u64 spi_sendbuf_status;
260} __attribute__ ((aligned(8))); 260} __aligned(8);
261 261
262/* 262/*
263 * This version number is given to the driver by the user code during 263 * This version number is given to the driver by the user code during
@@ -361,7 +361,7 @@ struct qib_user_info {
361 */ 361 */
362 __u64 spu_base_info; 362 __u64 spu_base_info;
363 363
364} __attribute__ ((aligned(8))); 364} __aligned(8);
365 365
366/* User commands. */ 366/* User commands. */
367 367
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
index 6abd3ed3cd51..5e75b43c596b 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
@@ -255,7 +255,6 @@ void qib_dbg_ibdev_init(struct qib_ibdev *ibd)
255 DEBUGFS_FILE_CREATE(opcode_stats); 255 DEBUGFS_FILE_CREATE(opcode_stats);
256 DEBUGFS_FILE_CREATE(ctx_stats); 256 DEBUGFS_FILE_CREATE(ctx_stats);
257 DEBUGFS_FILE_CREATE(qp_stats); 257 DEBUGFS_FILE_CREATE(qp_stats);
258 return;
259} 258}
260 259
261void qib_dbg_ibdev_exit(struct qib_ibdev *ibd) 260void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
index 5dfda4c5cc9c..8c34b23e5bf6 100644
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -85,7 +85,7 @@ static struct qib_diag_client *get_client(struct qib_devdata *dd)
85 client_pool = dc->next; 85 client_pool = dc->next;
86 else 86 else
87 /* None in pool, alloc and init */ 87 /* None in pool, alloc and init */
88 dc = kmalloc(sizeof *dc, GFP_KERNEL); 88 dc = kmalloc(sizeof(*dc), GFP_KERNEL);
89 89
90 if (dc) { 90 if (dc) {
91 dc->next = NULL; 91 dc->next = NULL;
@@ -257,6 +257,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
257 if (dd->userbase) { 257 if (dd->userbase) {
258 /* If user regs mapped, they are after send, so set limit. */ 258 /* If user regs mapped, they are after send, so set limit. */
259 u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase; 259 u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
260
260 if (!dd->piovl15base) 261 if (!dd->piovl15base)
261 snd_lim = dd->uregbase; 262 snd_lim = dd->uregbase;
262 krb32 = (u32 __iomem *)dd->userbase; 263 krb32 = (u32 __iomem *)dd->userbase;
@@ -280,6 +281,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
280 snd_bottom = dd->pio2k_bufbase; 281 snd_bottom = dd->pio2k_bufbase;
281 if (snd_lim == 0) { 282 if (snd_lim == 0) {
282 u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign); 283 u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
284
283 snd_lim = snd_bottom + tot2k; 285 snd_lim = snd_bottom + tot2k;
284 } 286 }
285 /* If 4k buffers exist, account for them by bumping 287 /* If 4k buffers exist, account for them by bumping
@@ -398,6 +400,7 @@ static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
398 /* not very efficient, but it works for now */ 400 /* not very efficient, but it works for now */
399 while (reg_addr < reg_end) { 401 while (reg_addr < reg_end) {
400 u64 data; 402 u64 data;
403
401 if (copy_from_user(&data, uaddr, sizeof(data))) { 404 if (copy_from_user(&data, uaddr, sizeof(data))) {
402 ret = -EFAULT; 405 ret = -EFAULT;
403 goto bail; 406 goto bail;
@@ -698,7 +701,7 @@ int qib_register_observer(struct qib_devdata *dd,
698 701
699 if (!dd || !op) 702 if (!dd || !op)
700 return -EINVAL; 703 return -EINVAL;
701 olp = vmalloc(sizeof *olp); 704 olp = vmalloc(sizeof(*olp));
702 if (!olp) { 705 if (!olp) {
703 pr_err("vmalloc for observer failed\n"); 706 pr_err("vmalloc for observer failed\n");
704 return -ENOMEM; 707 return -ENOMEM;
@@ -796,6 +799,7 @@ static ssize_t qib_diag_read(struct file *fp, char __user *data,
796 op = diag_get_observer(dd, *off); 799 op = diag_get_observer(dd, *off);
797 if (op) { 800 if (op) {
798 u32 offset = *off; 801 u32 offset = *off;
802
799 ret = op->hook(dd, op, offset, &data64, 0, use_32); 803 ret = op->hook(dd, op, offset, &data64, 0, use_32);
800 } 804 }
801 /* 805 /*
@@ -873,6 +877,7 @@ static ssize_t qib_diag_write(struct file *fp, const char __user *data,
873 if (count == 4 || count == 8) { 877 if (count == 4 || count == 8) {
874 u64 data64; 878 u64 data64;
875 u32 offset = *off; 879 u32 offset = *off;
880
876 ret = copy_from_user(&data64, data, count); 881 ret = copy_from_user(&data64, data, count);
877 if (ret) { 882 if (ret) {
878 ret = -EFAULT; 883 ret = -EFAULT;
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 5bee08f16d74..f58fdc3d25a2 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -86,7 +86,7 @@ const char *qib_get_unit_name(int unit)
86{ 86{
87 static char iname[16]; 87 static char iname[16];
88 88
89 snprintf(iname, sizeof iname, "infinipath%u", unit); 89 snprintf(iname, sizeof(iname), "infinipath%u", unit);
90 return iname; 90 return iname;
91} 91}
92 92
@@ -349,6 +349,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
349 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; 349 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
350 if (qp_num != QIB_MULTICAST_QPN) { 350 if (qp_num != QIB_MULTICAST_QPN) {
351 int ruc_res; 351 int ruc_res;
352
352 qp = qib_lookup_qpn(ibp, qp_num); 353 qp = qib_lookup_qpn(ibp, qp_num);
353 if (!qp) 354 if (!qp)
354 goto drop; 355 goto drop;
@@ -461,6 +462,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
461 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; 462 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
462 if (dd->flags & QIB_NODMA_RTAIL) { 463 if (dd->flags & QIB_NODMA_RTAIL) {
463 u32 seq = qib_hdrget_seq(rhf_addr); 464 u32 seq = qib_hdrget_seq(rhf_addr);
465
464 if (seq != rcd->seq_cnt) 466 if (seq != rcd->seq_cnt)
465 goto bail; 467 goto bail;
466 hdrqtail = 0; 468 hdrqtail = 0;
@@ -651,6 +653,7 @@ bail:
651int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc) 653int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
652{ 654{
653 struct qib_devdata *dd = ppd->dd; 655 struct qib_devdata *dd = ppd->dd;
656
654 ppd->lid = lid; 657 ppd->lid = lid;
655 ppd->lmc = lmc; 658 ppd->lmc = lmc;
656 659
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 4d5d71aaa2b4..311ee6c3dd5e 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -153,6 +153,7 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
153 153
154 if (t && dd0->nguid > 1 && t <= dd0->nguid) { 154 if (t && dd0->nguid > 1 && t <= dd0->nguid) {
155 u8 oguid; 155 u8 oguid;
156
156 dd->base_guid = dd0->base_guid; 157 dd->base_guid = dd0->base_guid;
157 bguid = (u8 *) &dd->base_guid; 158 bguid = (u8 *) &dd->base_guid;
158 159
@@ -251,206 +252,25 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
251 * This board has a Serial-prefix, which is stored 252 * This board has a Serial-prefix, which is stored
252 * elsewhere for backward-compatibility. 253 * elsewhere for backward-compatibility.
253 */ 254 */
254 memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix); 255 memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix));
255 snp[sizeof ifp->if_sprefix] = '\0'; 256 snp[sizeof(ifp->if_sprefix)] = '\0';
256 len = strlen(snp); 257 len = strlen(snp);
257 snp += len; 258 snp += len;
258 len = (sizeof dd->serial) - len; 259 len = sizeof(dd->serial) - len;
259 if (len > sizeof ifp->if_serial) 260 if (len > sizeof(ifp->if_serial))
260 len = sizeof ifp->if_serial; 261 len = sizeof(ifp->if_serial);
261 memcpy(snp, ifp->if_serial, len); 262 memcpy(snp, ifp->if_serial, len);
262 } else 263 } else {
263 memcpy(dd->serial, ifp->if_serial, 264 memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial));
264 sizeof ifp->if_serial); 265 }
265 if (!strstr(ifp->if_comment, "Tested successfully")) 266 if (!strstr(ifp->if_comment, "Tested successfully"))
266 qib_dev_err(dd, 267 qib_dev_err(dd,
267 "Board SN %s did not pass functional test: %s\n", 268 "Board SN %s did not pass functional test: %s\n",
268 dd->serial, ifp->if_comment); 269 dd->serial, ifp->if_comment);
269 270
270 memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
271 /*
272 * Power-on (actually "active") hours are kept as little-endian value
273 * in EEPROM, but as seconds in a (possibly as small as 24-bit)
274 * atomic_t while running.
275 */
276 atomic_set(&dd->active_time, 0);
277 dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
278
279done: 271done:
280 vfree(buf); 272 vfree(buf);
281 273
282bail:; 274bail:;
283} 275}
284 276
285/**
286 * qib_update_eeprom_log - copy active-time and error counters to eeprom
287 * @dd: the qlogic_ib device
288 *
289 * Although the time is kept as seconds in the qib_devdata struct, it is
290 * rounded to hours for re-write, as we have only 16 bits in EEPROM.
291 * First-cut code reads whole (expected) struct qib_flash, modifies,
292 * re-writes. Future direction: read/write only what we need, assuming
293 * that the EEPROM had to have been "good enough" for driver init, and
294 * if not, we aren't making it worse.
295 *
296 */
297int qib_update_eeprom_log(struct qib_devdata *dd)
298{
299 void *buf;
300 struct qib_flash *ifp;
301 int len, hi_water;
302 uint32_t new_time, new_hrs;
303 u8 csum;
304 int ret, idx;
305 unsigned long flags;
306
307 /* first, check if we actually need to do anything. */
308 ret = 0;
309 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
310 if (dd->eep_st_new_errs[idx]) {
311 ret = 1;
312 break;
313 }
314 }
315 new_time = atomic_read(&dd->active_time);
316
317 if (ret == 0 && new_time < 3600)
318 goto bail;
319
320 /*
321 * The quick-check above determined that there is something worthy
322 * of logging, so get current contents and do a more detailed idea.
323 * read full flash, not just currently used part, since it may have
324 * been written with a newer definition
325 */
326 len = sizeof(struct qib_flash);
327 buf = vmalloc(len);
328 ret = 1;
329 if (!buf) {
330 qib_dev_err(dd,
331 "Couldn't allocate memory to read %u bytes from eeprom for logging\n",
332 len);
333 goto bail;
334 }
335
336 /* Grab semaphore and read current EEPROM. If we get an
337 * error, let go, but if not, keep it until we finish write.
338 */
339 ret = mutex_lock_interruptible(&dd->eep_lock);
340 if (ret) {
341 qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
342 goto free_bail;
343 }
344 ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
345 if (ret) {
346 mutex_unlock(&dd->eep_lock);
347 qib_dev_err(dd, "Unable read EEPROM for logging\n");
348 goto free_bail;
349 }
350 ifp = (struct qib_flash *)buf;
351
352 csum = flash_csum(ifp, 0);
353 if (csum != ifp->if_csum) {
354 mutex_unlock(&dd->eep_lock);
355 qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
356 csum, ifp->if_csum);
357 ret = 1;
358 goto free_bail;
359 }
360 hi_water = 0;
361 spin_lock_irqsave(&dd->eep_st_lock, flags);
362 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
363 int new_val = dd->eep_st_new_errs[idx];
364 if (new_val) {
365 /*
366 * If we have seen any errors, add to EEPROM values
367 * We need to saturate at 0xFF (255) and we also
368 * would need to adjust the checksum if we were
369 * trying to minimize EEPROM traffic
370 * Note that we add to actual current count in EEPROM,
371 * in case it was altered while we were running.
372 */
373 new_val += ifp->if_errcntp[idx];
374 if (new_val > 0xFF)
375 new_val = 0xFF;
376 if (ifp->if_errcntp[idx] != new_val) {
377 ifp->if_errcntp[idx] = new_val;
378 hi_water = offsetof(struct qib_flash,
379 if_errcntp) + idx;
380 }
381 /*
382 * update our shadow (used to minimize EEPROM
383 * traffic), to match what we are about to write.
384 */
385 dd->eep_st_errs[idx] = new_val;
386 dd->eep_st_new_errs[idx] = 0;
387 }
388 }
389 /*
390 * Now update active-time. We would like to round to the nearest hour
391 * but unless atomic_t are sure to be proper signed ints we cannot,
392 * because we need to account for what we "transfer" to EEPROM and
393 * if we log an hour at 31 minutes, then we would need to set
394 * active_time to -29 to accurately count the _next_ hour.
395 */
396 if (new_time >= 3600) {
397 new_hrs = new_time / 3600;
398 atomic_sub((new_hrs * 3600), &dd->active_time);
399 new_hrs += dd->eep_hrs;
400 if (new_hrs > 0xFFFF)
401 new_hrs = 0xFFFF;
402 dd->eep_hrs = new_hrs;
403 if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
404 ifp->if_powerhour[0] = new_hrs & 0xFF;
405 hi_water = offsetof(struct qib_flash, if_powerhour);
406 }
407 if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
408 ifp->if_powerhour[1] = new_hrs >> 8;
409 hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
410 }
411 }
412 /*
413 * There is a tiny possibility that we could somehow fail to write
414 * the EEPROM after updating our shadows, but problems from holding
415 * the spinlock too long are a much bigger issue.
416 */
417 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
418 if (hi_water) {
419 /* we made some change to the data, uopdate cksum and write */
420 csum = flash_csum(ifp, 1);
421 ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
422 }
423 mutex_unlock(&dd->eep_lock);
424 if (ret)
425 qib_dev_err(dd, "Failed updating EEPROM\n");
426
427free_bail:
428 vfree(buf);
429bail:
430 return ret;
431}
432
433/**
434 * qib_inc_eeprom_err - increment one of the four error counters
435 * that are logged to EEPROM.
436 * @dd: the qlogic_ib device
437 * @eidx: 0..3, the counter to increment
438 * @incr: how much to add
439 *
440 * Each counter is 8-bits, and saturates at 255 (0xFF). They
441 * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
442 * is called, but it can only be called in a context that allows sleep.
443 * This function can be called even at interrupt level.
444 */
445void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
446{
447 uint new_val;
448 unsigned long flags;
449
450 spin_lock_irqsave(&dd->eep_st_lock, flags);
451 new_val = dd->eep_st_new_errs[eidx] + incr;
452 if (new_val > 255)
453 new_val = 255;
454 dd->eep_st_new_errs[eidx] = new_val;
455 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
456}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index b15e34eeef68..41937c6f888a 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -351,9 +351,10 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
351 * unless perhaps the user has mpin'ed the pages 351 * unless perhaps the user has mpin'ed the pages
352 * themselves. 352 * themselves.
353 */ 353 */
354 qib_devinfo(dd->pcidev, 354 qib_devinfo(
355 "Failed to lock addr %p, %u pages: " 355 dd->pcidev,
356 "errno %d\n", (void *) vaddr, cnt, -ret); 356 "Failed to lock addr %p, %u pages: errno %d\n",
357 (void *) vaddr, cnt, -ret);
357 goto done; 358 goto done;
358 } 359 }
359 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { 360 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
@@ -437,7 +438,7 @@ cleanup:
437 goto cleanup; 438 goto cleanup;
438 } 439 }
439 if (copy_to_user((void __user *) (unsigned long) ti->tidmap, 440 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
440 tidmap, sizeof tidmap)) { 441 tidmap, sizeof(tidmap))) {
441 ret = -EFAULT; 442 ret = -EFAULT;
442 goto cleanup; 443 goto cleanup;
443 } 444 }
@@ -484,7 +485,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
484 } 485 }
485 486
486 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, 487 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
487 sizeof tidmap)) { 488 sizeof(tidmap))) {
488 ret = -EFAULT; 489 ret = -EFAULT;
489 goto done; 490 goto done;
490 } 491 }
@@ -951,8 +952,8 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
951 /* rcvegrbufs are read-only on the slave */ 952 /* rcvegrbufs are read-only on the slave */
952 if (vma->vm_flags & VM_WRITE) { 953 if (vma->vm_flags & VM_WRITE) {
953 qib_devinfo(dd->pcidev, 954 qib_devinfo(dd->pcidev,
954 "Can't map eager buffers as " 955 "Can't map eager buffers as writable (flags=%lx)\n",
955 "writable (flags=%lx)\n", vma->vm_flags); 956 vma->vm_flags);
956 ret = -EPERM; 957 ret = -EPERM;
957 goto bail; 958 goto bail;
958 } 959 }
@@ -1185,6 +1186,7 @@ static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
1185 */ 1186 */
1186 if (weight >= qib_cpulist_count) { 1187 if (weight >= qib_cpulist_count) {
1187 int cpu; 1188 int cpu;
1189
1188 cpu = find_first_zero_bit(qib_cpulist, 1190 cpu = find_first_zero_bit(qib_cpulist,
1189 qib_cpulist_count); 1191 qib_cpulist_count);
1190 if (cpu == qib_cpulist_count) 1192 if (cpu == qib_cpulist_count)
@@ -1247,10 +1249,7 @@ static int init_subctxts(struct qib_devdata *dd,
1247 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16, 1249 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
1248 uinfo->spu_userversion & 0xffff)) { 1250 uinfo->spu_userversion & 0xffff)) {
1249 qib_devinfo(dd->pcidev, 1251 qib_devinfo(dd->pcidev,
1250 "Mismatched user version (%d.%d) and driver " 1252 "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
1251 "version (%d.%d) while context sharing. Ensure "
1252 "that driver and library are from the same "
1253 "release.\n",
1254 (int) (uinfo->spu_userversion >> 16), 1253 (int) (uinfo->spu_userversion >> 16),
1255 (int) (uinfo->spu_userversion & 0xffff), 1254 (int) (uinfo->spu_userversion & 0xffff),
1256 QIB_USER_SWMAJOR, QIB_USER_SWMINOR); 1255 QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
@@ -1391,6 +1390,7 @@ static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
1391 } 1390 }
1392 if (!ppd) { 1391 if (!ppd) {
1393 u32 pidx = ctxt % dd->num_pports; 1392 u32 pidx = ctxt % dd->num_pports;
1393
1394 if (usable(dd->pport + pidx)) 1394 if (usable(dd->pport + pidx))
1395 ppd = dd->pport + pidx; 1395 ppd = dd->pport + pidx;
1396 else { 1396 else {
@@ -1438,10 +1438,12 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1438 1438
1439 if (alg == QIB_PORT_ALG_ACROSS) { 1439 if (alg == QIB_PORT_ALG_ACROSS) {
1440 unsigned inuse = ~0U; 1440 unsigned inuse = ~0U;
1441
1441 /* find device (with ACTIVE ports) with fewest ctxts in use */ 1442 /* find device (with ACTIVE ports) with fewest ctxts in use */
1442 for (ndev = 0; ndev < devmax; ndev++) { 1443 for (ndev = 0; ndev < devmax; ndev++) {
1443 struct qib_devdata *dd = qib_lookup(ndev); 1444 struct qib_devdata *dd = qib_lookup(ndev);
1444 unsigned cused = 0, cfree = 0, pusable = 0; 1445 unsigned cused = 0, cfree = 0, pusable = 0;
1446
1445 if (!dd) 1447 if (!dd)
1446 continue; 1448 continue;
1447 if (port && port <= dd->num_pports && 1449 if (port && port <= dd->num_pports &&
@@ -1471,6 +1473,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1471 } else { 1473 } else {
1472 for (ndev = 0; ndev < devmax; ndev++) { 1474 for (ndev = 0; ndev < devmax; ndev++) {
1473 struct qib_devdata *dd = qib_lookup(ndev); 1475 struct qib_devdata *dd = qib_lookup(ndev);
1476
1474 if (dd) { 1477 if (dd) {
1475 ret = choose_port_ctxt(fp, dd, port, uinfo); 1478 ret = choose_port_ctxt(fp, dd, port, uinfo);
1476 if (!ret) 1479 if (!ret)
@@ -1556,6 +1559,7 @@ static int find_hca(unsigned int cpu, int *unit)
1556 } 1559 }
1557 for (ndev = 0; ndev < devmax; ndev++) { 1560 for (ndev = 0; ndev < devmax; ndev++) {
1558 struct qib_devdata *dd = qib_lookup(ndev); 1561 struct qib_devdata *dd = qib_lookup(ndev);
1562
1559 if (dd) { 1563 if (dd) {
1560 if (pcibus_to_node(dd->pcidev->bus) < 0) { 1564 if (pcibus_to_node(dd->pcidev->bus) < 0) {
1561 ret = -EINVAL; 1565 ret = -EINVAL;
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 81854586c081..650897a8591e 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -106,7 +106,7 @@ static ssize_t driver_stats_read(struct file *file, char __user *buf,
106{ 106{
107 qib_stats.sps_ints = qib_sps_ints(); 107 qib_stats.sps_ints = qib_sps_ints();
108 return simple_read_from_buffer(buf, count, ppos, &qib_stats, 108 return simple_read_from_buffer(buf, count, ppos, &qib_stats,
109 sizeof qib_stats); 109 sizeof(qib_stats));
110} 110}
111 111
112/* 112/*
@@ -133,7 +133,7 @@ static ssize_t driver_names_read(struct file *file, char __user *buf,
133 size_t count, loff_t *ppos) 133 size_t count, loff_t *ppos)
134{ 134{
135 return simple_read_from_buffer(buf, count, ppos, qib_statnames, 135 return simple_read_from_buffer(buf, count, ppos, qib_statnames,
136 sizeof qib_statnames - 1); /* no null */ 136 sizeof(qib_statnames) - 1); /* no null */
137} 137}
138 138
139static const struct file_operations driver_ops[] = { 139static const struct file_operations driver_ops[] = {
@@ -379,7 +379,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
379 int ret, i; 379 int ret, i;
380 380
381 /* create the per-unit directory */ 381 /* create the per-unit directory */
382 snprintf(unit, sizeof unit, "%u", dd->unit); 382 snprintf(unit, sizeof(unit), "%u", dd->unit);
383 ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, 383 ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
384 &simple_dir_operations, dd); 384 &simple_dir_operations, dd);
385 if (ret) { 385 if (ret) {
@@ -455,7 +455,7 @@ static int remove_file(struct dentry *parent, char *name)
455 } 455 }
456 456
457 spin_lock(&tmp->d_lock); 457 spin_lock(&tmp->d_lock);
458 if (!(d_unhashed(tmp) && tmp->d_inode)) { 458 if (!d_unhashed(tmp) && tmp->d_inode) {
459 __d_drop(tmp); 459 __d_drop(tmp);
460 spin_unlock(&tmp->d_lock); 460 spin_unlock(&tmp->d_lock);
461 simple_unlink(parent->d_inode, tmp); 461 simple_unlink(parent->d_inode, tmp);
@@ -482,7 +482,7 @@ static int remove_device_files(struct super_block *sb,
482 482
483 root = dget(sb->s_root); 483 root = dget(sb->s_root);
484 mutex_lock(&root->d_inode->i_mutex); 484 mutex_lock(&root->d_inode->i_mutex);
485 snprintf(unit, sizeof unit, "%u", dd->unit); 485 snprintf(unit, sizeof(unit), "%u", dd->unit);
486 dir = lookup_one_len(unit, root, strlen(unit)); 486 dir = lookup_one_len(unit, root, strlen(unit));
487 487
488 if (IS_ERR(dir)) { 488 if (IS_ERR(dir)) {
@@ -560,6 +560,7 @@ static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags,
560 const char *dev_name, void *data) 560 const char *dev_name, void *data)
561{ 561{
562 struct dentry *ret; 562 struct dentry *ret;
563
563 ret = mount_single(fs_type, flags, data, qibfs_fill_super); 564 ret = mount_single(fs_type, flags, data, qibfs_fill_super);
564 if (!IS_ERR(ret)) 565 if (!IS_ERR(ret))
565 qib_super = ret->d_sb; 566 qib_super = ret->d_sb;
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index d68266ac7619..0d2ba59af30a 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -333,6 +333,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
333 enum qib_ureg regno, u64 value, int ctxt) 333 enum qib_ureg regno, u64 value, int ctxt)
334{ 334{
335 u64 __iomem *ubase; 335 u64 __iomem *ubase;
336
336 if (dd->userbase) 337 if (dd->userbase)
337 ubase = (u64 __iomem *) 338 ubase = (u64 __iomem *)
338 ((char __iomem *) dd->userbase + 339 ((char __iomem *) dd->userbase +
@@ -834,14 +835,14 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
834 bits = (u32) ((hwerrs >> 835 bits = (u32) ((hwerrs >>
835 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & 836 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
836 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); 837 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
837 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 838 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
838 "[PCIe Mem Parity Errs %x] ", bits); 839 "[PCIe Mem Parity Errs %x] ", bits);
839 strlcat(msg, bitsmsg, msgl); 840 strlcat(msg, bitsmsg, msgl);
840 } 841 }
841 842
842 if (hwerrs & _QIB_PLL_FAIL) { 843 if (hwerrs & _QIB_PLL_FAIL) {
843 isfatal = 1; 844 isfatal = 1;
844 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 845 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
845 "[PLL failed (%llx), InfiniPath hardware unusable]", 846 "[PLL failed (%llx), InfiniPath hardware unusable]",
846 (unsigned long long) hwerrs & _QIB_PLL_FAIL); 847 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
847 strlcat(msg, bitsmsg, msgl); 848 strlcat(msg, bitsmsg, msgl);
@@ -1014,7 +1015,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
1014 1015
1015 /* do these first, they are most important */ 1016 /* do these first, they are most important */
1016 if (errs & ERR_MASK(HardwareErr)) 1017 if (errs & ERR_MASK(HardwareErr))
1017 qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); 1018 qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1018 else 1019 else
1019 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) 1020 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1020 if (errs & dd->eep_st_masks[log_idx].errs_to_log) 1021 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1062,7 +1063,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
1062 */ 1063 */
1063 mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) | 1064 mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
1064 ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr); 1065 ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
1065 qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); 1066 qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
1066 1067
1067 if (errs & E_SUM_PKTERRS) 1068 if (errs & E_SUM_PKTERRS)
1068 qib_stats.sps_rcverrs++; 1069 qib_stats.sps_rcverrs++;
@@ -1670,6 +1671,7 @@ static irqreturn_t qib_6120intr(int irq, void *data)
1670 } 1671 }
1671 if (crcs) { 1672 if (crcs) {
1672 u32 cntr = dd->cspec->lli_counter; 1673 u32 cntr = dd->cspec->lli_counter;
1674
1673 cntr += crcs; 1675 cntr += crcs;
1674 if (cntr) { 1676 if (cntr) {
1675 if (cntr > dd->cspec->lli_thresh) { 1677 if (cntr > dd->cspec->lli_thresh) {
@@ -1722,6 +1724,7 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd)
1722 "irq is 0, BIOS error? Interrupts won't work\n"); 1724 "irq is 0, BIOS error? Interrupts won't work\n");
1723 else { 1725 else {
1724 int ret; 1726 int ret;
1727
1725 ret = request_irq(dd->cspec->irq, qib_6120intr, 0, 1728 ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
1726 QIB_DRV_NAME, dd); 1729 QIB_DRV_NAME, dd);
1727 if (ret) 1730 if (ret)
@@ -2681,8 +2684,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
2681 spin_lock_irqsave(&dd->eep_st_lock, flags); 2684 spin_lock_irqsave(&dd->eep_st_lock, flags);
2682 traffic_wds -= dd->traffic_wds; 2685 traffic_wds -= dd->traffic_wds;
2683 dd->traffic_wds += traffic_wds; 2686 dd->traffic_wds += traffic_wds;
2684 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
2685 atomic_add(5, &dd->active_time); /* S/B #define */
2686 spin_unlock_irqrestore(&dd->eep_st_lock, flags); 2687 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
2687 2688
2688 qib_chk_6120_errormask(dd); 2689 qib_chk_6120_errormask(dd);
@@ -2929,6 +2930,7 @@ bail:
2929static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what) 2930static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
2930{ 2931{
2931 int ret = 0; 2932 int ret = 0;
2933
2932 if (!strncmp(what, "ibc", 3)) { 2934 if (!strncmp(what, "ibc", 3)) {
2933 ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback); 2935 ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
2934 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", 2936 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
@@ -3170,6 +3172,7 @@ static void get_6120_chip_params(struct qib_devdata *dd)
3170static void set_6120_baseaddrs(struct qib_devdata *dd) 3172static void set_6120_baseaddrs(struct qib_devdata *dd)
3171{ 3173{
3172 u32 cregbase; 3174 u32 cregbase;
3175
3173 cregbase = qib_read_kreg32(dd, kr_counterregbase); 3176 cregbase = qib_read_kreg32(dd, kr_counterregbase);
3174 dd->cspec->cregbase = (u64 __iomem *) 3177 dd->cspec->cregbase = (u64 __iomem *)
3175 ((char __iomem *) dd->kregbase + cregbase); 3178 ((char __iomem *) dd->kregbase + cregbase);
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 7dec89fdc124..22affda8af88 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -902,7 +902,8 @@ static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
902 errs &= QLOGIC_IB_E_SDMAERRS; 902 errs &= QLOGIC_IB_E_SDMAERRS;
903 903
904 msg = dd->cspec->sdmamsgbuf; 904 msg = dd->cspec->sdmamsgbuf;
905 qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf); 905 qib_decode_7220_sdma_errs(ppd, errs, msg,
906 sizeof(dd->cspec->sdmamsgbuf));
906 spin_lock_irqsave(&ppd->sdma_lock, flags); 907 spin_lock_irqsave(&ppd->sdma_lock, flags);
907 908
908 if (errs & ERR_MASK(SendBufMisuseErr)) { 909 if (errs & ERR_MASK(SendBufMisuseErr)) {
@@ -1043,6 +1044,7 @@ done:
1043static void reenable_7220_chase(unsigned long opaque) 1044static void reenable_7220_chase(unsigned long opaque)
1044{ 1045{
1045 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; 1046 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1047
1046 ppd->cpspec->chase_timer.expires = 0; 1048 ppd->cpspec->chase_timer.expires = 0;
1047 qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, 1049 qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1048 QLOGIC_IB_IBCC_LINKINITCMD_POLL); 1050 QLOGIC_IB_IBCC_LINKINITCMD_POLL);
@@ -1101,7 +1103,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
1101 1103
1102 /* do these first, they are most important */ 1104 /* do these first, they are most important */
1103 if (errs & ERR_MASK(HardwareErr)) 1105 if (errs & ERR_MASK(HardwareErr))
1104 qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); 1106 qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1105 else 1107 else
1106 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) 1108 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1107 if (errs & dd->eep_st_masks[log_idx].errs_to_log) 1109 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1155,7 +1157,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
1155 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | 1157 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
1156 ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr); 1158 ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
1157 1159
1158 qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); 1160 qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
1159 1161
1160 if (errs & E_SUM_PKTERRS) 1162 if (errs & E_SUM_PKTERRS)
1161 qib_stats.sps_rcverrs++; 1163 qib_stats.sps_rcverrs++;
@@ -1380,7 +1382,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
1380 bits = (u32) ((hwerrs >> 1382 bits = (u32) ((hwerrs >>
1381 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & 1383 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
1382 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); 1384 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
1383 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 1385 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
1384 "[PCIe Mem Parity Errs %x] ", bits); 1386 "[PCIe Mem Parity Errs %x] ", bits);
1385 strlcat(msg, bitsmsg, msgl); 1387 strlcat(msg, bitsmsg, msgl);
1386 } 1388 }
@@ -1390,7 +1392,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
1390 1392
1391 if (hwerrs & _QIB_PLL_FAIL) { 1393 if (hwerrs & _QIB_PLL_FAIL) {
1392 isfatal = 1; 1394 isfatal = 1;
1393 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 1395 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
1394 "[PLL failed (%llx), InfiniPath hardware unusable]", 1396 "[PLL failed (%llx), InfiniPath hardware unusable]",
1395 (unsigned long long) hwerrs & _QIB_PLL_FAIL); 1397 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
1396 strlcat(msg, bitsmsg, msgl); 1398 strlcat(msg, bitsmsg, msgl);
@@ -3297,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
3297 spin_lock_irqsave(&dd->eep_st_lock, flags); 3299 spin_lock_irqsave(&dd->eep_st_lock, flags);
3298 traffic_wds -= dd->traffic_wds; 3300 traffic_wds -= dd->traffic_wds;
3299 dd->traffic_wds += traffic_wds; 3301 dd->traffic_wds += traffic_wds;
3300 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
3301 atomic_add(5, &dd->active_time); /* S/B #define */
3302 spin_unlock_irqrestore(&dd->eep_st_lock, flags); 3302 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
3303done: 3303done:
3304 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); 3304 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index a7eb32517a04..ef97b71c8f7d 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -117,7 +117,7 @@ MODULE_PARM_DESC(chase, "Enable state chase handling");
117 117
118static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */ 118static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
119module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO); 119module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
120MODULE_PARM_DESC(long_attenuation, \ 120MODULE_PARM_DESC(long_attenuation,
121 "attenuation cutoff (dB) for long copper cable setup"); 121 "attenuation cutoff (dB) for long copper cable setup");
122 122
123static ushort qib_singleport; 123static ushort qib_singleport;
@@ -153,11 +153,12 @@ static struct kparam_string kp_txselect = {
153static int setup_txselect(const char *, struct kernel_param *); 153static int setup_txselect(const char *, struct kernel_param *);
154module_param_call(txselect, setup_txselect, param_get_string, 154module_param_call(txselect, setup_txselect, param_get_string,
155 &kp_txselect, S_IWUSR | S_IRUGO); 155 &kp_txselect, S_IWUSR | S_IRUGO);
156MODULE_PARM_DESC(txselect, \ 156MODULE_PARM_DESC(txselect,
157 "Tx serdes indices (for no QSFP or invalid QSFP data)"); 157 "Tx serdes indices (for no QSFP or invalid QSFP data)");
158 158
159#define BOARD_QME7342 5 159#define BOARD_QME7342 5
160#define BOARD_QMH7342 6 160#define BOARD_QMH7342 6
161#define BOARD_QMH7360 9
161#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ 162#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
162 BOARD_QMH7342) 163 BOARD_QMH7342)
163#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ 164#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
@@ -817,6 +818,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
817 enum qib_ureg regno, u64 value, int ctxt) 818 enum qib_ureg regno, u64 value, int ctxt)
818{ 819{
819 u64 __iomem *ubase; 820 u64 __iomem *ubase;
821
820 if (dd->userbase) 822 if (dd->userbase)
821 ubase = (u64 __iomem *) 823 ubase = (u64 __iomem *)
822 ((char __iomem *) dd->userbase + 824 ((char __iomem *) dd->userbase +
@@ -1677,7 +1679,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
1677 /* do these first, they are most important */ 1679 /* do these first, they are most important */
1678 if (errs & QIB_E_HARDWARE) { 1680 if (errs & QIB_E_HARDWARE) {
1679 *msg = '\0'; 1681 *msg = '\0';
1680 qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); 1682 qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1681 } else 1683 } else
1682 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) 1684 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1683 if (errs & dd->eep_st_masks[log_idx].errs_to_log) 1685 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1702,7 +1704,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
1702 mask = QIB_E_HARDWARE; 1704 mask = QIB_E_HARDWARE;
1703 *msg = '\0'; 1705 *msg = '\0';
1704 1706
1705 err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask, 1707 err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
1706 qib_7322error_msgs); 1708 qib_7322error_msgs);
1707 1709
1708 /* 1710 /*
@@ -1889,10 +1891,10 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1889 *msg = '\0'; 1891 *msg = '\0';
1890 1892
1891 if (errs & ~QIB_E_P_BITSEXTANT) { 1893 if (errs & ~QIB_E_P_BITSEXTANT) {
1892 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, 1894 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1893 errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs); 1895 errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1894 if (!*msg) 1896 if (!*msg)
1895 snprintf(msg, sizeof ppd->cpspec->epmsgbuf, 1897 snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1896 "no others"); 1898 "no others");
1897 qib_dev_porterr(dd, ppd->port, 1899 qib_dev_porterr(dd, ppd->port,
1898 "error interrupt with unknown errors 0x%016Lx set (and %s)\n", 1900 "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
@@ -1906,7 +1908,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1906 /* determine cause, then write to clear */ 1908 /* determine cause, then write to clear */
1907 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom); 1909 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1908 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0); 1910 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1909 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom, 1911 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1910 hdrchk_msgs); 1912 hdrchk_msgs);
1911 *msg = '\0'; 1913 *msg = '\0';
1912 /* senderrbuf cleared in SPKTERRS below */ 1914 /* senderrbuf cleared in SPKTERRS below */
@@ -1922,7 +1924,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1922 * isn't valid. We don't want to confuse people, so 1924 * isn't valid. We don't want to confuse people, so
1923 * we just don't print them, except at debug 1925 * we just don't print them, except at debug
1924 */ 1926 */
1925 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, 1927 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1926 (errs & QIB_E_P_LINK_PKTERRS), 1928 (errs & QIB_E_P_LINK_PKTERRS),
1927 qib_7322p_error_msgs); 1929 qib_7322p_error_msgs);
1928 *msg = '\0'; 1930 *msg = '\0';
@@ -1938,7 +1940,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1938 * valid. We don't want to confuse people, so we just 1940 * valid. We don't want to confuse people, so we just
1939 * don't print them, except at debug 1941 * don't print them, except at debug
1940 */ 1942 */
1941 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs, 1943 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1942 qib_7322p_error_msgs); 1944 qib_7322p_error_msgs);
1943 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; 1945 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1944 *msg = '\0'; 1946 *msg = '\0';
@@ -2031,6 +2033,7 @@ static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2031 if (dd->cspec->num_msix_entries) { 2033 if (dd->cspec->num_msix_entries) {
2032 /* and same for MSIx */ 2034 /* and same for MSIx */
2033 u64 val = qib_read_kreg64(dd, kr_intgranted); 2035 u64 val = qib_read_kreg64(dd, kr_intgranted);
2036
2034 if (val) 2037 if (val)
2035 qib_write_kreg(dd, kr_intgranted, val); 2038 qib_write_kreg(dd, kr_intgranted, val);
2036 } 2039 }
@@ -2176,6 +2179,7 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2176 int err; 2179 int err;
2177 unsigned long flags; 2180 unsigned long flags;
2178 struct qib_pportdata *ppd = dd->pport; 2181 struct qib_pportdata *ppd = dd->pport;
2182
2179 for (; pidx < dd->num_pports; ++pidx, ppd++) { 2183 for (; pidx < dd->num_pports; ++pidx, ppd++) {
2180 err = 0; 2184 err = 0;
2181 if (pidx == 0 && (hwerrs & 2185 if (pidx == 0 && (hwerrs &
@@ -2801,9 +2805,11 @@ static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2801 2805
2802 if (n->rcv) { 2806 if (n->rcv) {
2803 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; 2807 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2808
2804 qib_update_rhdrq_dca(rcd, cpu); 2809 qib_update_rhdrq_dca(rcd, cpu);
2805 } else { 2810 } else {
2806 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; 2811 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2812
2807 qib_update_sdma_dca(ppd, cpu); 2813 qib_update_sdma_dca(ppd, cpu);
2808 } 2814 }
2809} 2815}
@@ -2816,9 +2822,11 @@ static void qib_irq_notifier_release(struct kref *ref)
2816 2822
2817 if (n->rcv) { 2823 if (n->rcv) {
2818 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; 2824 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2825
2819 dd = rcd->dd; 2826 dd = rcd->dd;
2820 } else { 2827 } else {
2821 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; 2828 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2829
2822 dd = ppd->dd; 2830 dd = ppd->dd;
2823 } 2831 }
2824 qib_devinfo(dd->pcidev, 2832 qib_devinfo(dd->pcidev,
@@ -2994,6 +3002,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2994 struct qib_pportdata *ppd; 3002 struct qib_pportdata *ppd;
2995 struct qib_qsfp_data *qd; 3003 struct qib_qsfp_data *qd;
2996 u32 mask; 3004 u32 mask;
3005
2997 if (!dd->pport[pidx].link_speed_supported) 3006 if (!dd->pport[pidx].link_speed_supported)
2998 continue; 3007 continue;
2999 mask = QSFP_GPIO_MOD_PRS_N; 3008 mask = QSFP_GPIO_MOD_PRS_N;
@@ -3001,6 +3010,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
3001 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); 3010 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
3002 if (gpiostatus & dd->cspec->gpio_mask & mask) { 3011 if (gpiostatus & dd->cspec->gpio_mask & mask) {
3003 u64 pins; 3012 u64 pins;
3013
3004 qd = &ppd->cpspec->qsfp_data; 3014 qd = &ppd->cpspec->qsfp_data;
3005 gpiostatus &= ~mask; 3015 gpiostatus &= ~mask;
3006 pins = qib_read_kreg64(dd, kr_extstatus); 3016 pins = qib_read_kreg64(dd, kr_extstatus);
@@ -3442,7 +3452,7 @@ try_intx:
3442 } 3452 }
3443 3453
3444 /* Try to get MSIx interrupts */ 3454 /* Try to get MSIx interrupts */
3445 memset(redirect, 0, sizeof redirect); 3455 memset(redirect, 0, sizeof(redirect));
3446 mask = ~0ULL; 3456 mask = ~0ULL;
3447 msixnum = 0; 3457 msixnum = 0;
3448 local_mask = cpumask_of_pcibus(dd->pcidev->bus); 3458 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
@@ -3617,6 +3627,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
3617 n = "InfiniPath_QME7362"; 3627 n = "InfiniPath_QME7362";
3618 dd->flags |= QIB_HAS_QSFP; 3628 dd->flags |= QIB_HAS_QSFP;
3619 break; 3629 break;
3630 case BOARD_QMH7360:
3631 n = "Intel IB QDR 1P FLR-QSFP Adptr";
3632 dd->flags |= QIB_HAS_QSFP;
3633 break;
3620 case 15: 3634 case 15:
3621 n = "InfiniPath_QLE7342_TEST"; 3635 n = "InfiniPath_QLE7342_TEST";
3622 dd->flags |= QIB_HAS_QSFP; 3636 dd->flags |= QIB_HAS_QSFP;
@@ -3694,6 +3708,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
3694 */ 3708 */
3695 for (i = 0; i < msix_entries; i++) { 3709 for (i = 0; i < msix_entries; i++) {
3696 u64 vecaddr, vecdata; 3710 u64 vecaddr, vecdata;
3711
3697 vecaddr = qib_read_kreg64(dd, 2 * i + 3712 vecaddr = qib_read_kreg64(dd, 2 * i +
3698 (QIB_7322_MsixTable_OFFS / sizeof(u64))); 3713 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3699 vecdata = qib_read_kreg64(dd, 1 + 2 * i + 3714 vecdata = qib_read_kreg64(dd, 1 + 2 * i +
@@ -5178,8 +5193,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
5178 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); 5193 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5179 traffic_wds -= ppd->dd->traffic_wds; 5194 traffic_wds -= ppd->dd->traffic_wds;
5180 ppd->dd->traffic_wds += traffic_wds; 5195 ppd->dd->traffic_wds += traffic_wds;
5181 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
5182 atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
5183 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); 5196 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5184 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & 5197 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5185 QIB_IB_QDR) && 5198 QIB_IB_QDR) &&
@@ -5357,6 +5370,7 @@ static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5357static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) 5370static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5358{ 5371{
5359 u64 newctrlb; 5372 u64 newctrlb;
5373
5360 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK | 5374 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5361 IBA7322_IBC_IBTA_1_2_MASK | 5375 IBA7322_IBC_IBTA_1_2_MASK |
5362 IBA7322_IBC_MAX_SPEED_MASK); 5376 IBA7322_IBC_MAX_SPEED_MASK);
@@ -5843,6 +5857,7 @@ static void get_7322_chip_params(struct qib_devdata *dd)
5843static void qib_7322_set_baseaddrs(struct qib_devdata *dd) 5857static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5844{ 5858{
5845 u32 cregbase; 5859 u32 cregbase;
5860
5846 cregbase = qib_read_kreg32(dd, kr_counterregbase); 5861 cregbase = qib_read_kreg32(dd, kr_counterregbase);
5847 5862
5848 dd->cspec->cregbase = (u64 __iomem *)(cregbase + 5863 dd->cspec->cregbase = (u64 __iomem *)(cregbase +
@@ -6183,6 +6198,7 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
6183 struct qib_devdata *dd; 6198 struct qib_devdata *dd;
6184 unsigned long val; 6199 unsigned long val;
6185 char *n; 6200 char *n;
6201
6186 if (strlen(str) >= MAX_ATTEN_LEN) { 6202 if (strlen(str) >= MAX_ATTEN_LEN) {
6187 pr_info("txselect_values string too long\n"); 6203 pr_info("txselect_values string too long\n");
6188 return -ENOSPC; 6204 return -ENOSPC;
@@ -6393,6 +6409,7 @@ static void write_7322_initregs(struct qib_devdata *dd)
6393 val = TIDFLOW_ERRBITS; /* these are W1C */ 6409 val = TIDFLOW_ERRBITS; /* these are W1C */
6394 for (i = 0; i < dd->cfgctxts; i++) { 6410 for (i = 0; i < dd->cfgctxts; i++) {
6395 int flow; 6411 int flow;
6412
6396 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) 6413 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6397 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); 6414 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6398 } 6415 }
@@ -6503,6 +6520,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6503 6520
6504 for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) { 6521 for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6505 struct qib_chippport_specific *cp = ppd->cpspec; 6522 struct qib_chippport_specific *cp = ppd->cpspec;
6523
6506 ppd->link_speed_supported = features & PORT_SPD_CAP; 6524 ppd->link_speed_supported = features & PORT_SPD_CAP;
6507 features >>= PORT_SPD_CAP_SHIFT; 6525 features >>= PORT_SPD_CAP_SHIFT;
6508 if (!ppd->link_speed_supported) { 6526 if (!ppd->link_speed_supported) {
@@ -6581,8 +6599,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6581 ppd->vls_supported = IB_VL_VL0_7; 6599 ppd->vls_supported = IB_VL_VL0_7;
6582 else { 6600 else {
6583 qib_devinfo(dd->pcidev, 6601 qib_devinfo(dd->pcidev,
6584 "Invalid num_vls %u for MTU %d " 6602 "Invalid num_vls %u for MTU %d , using 4 VLs\n",
6585 ", using 4 VLs\n",
6586 qib_num_cfg_vls, mtu); 6603 qib_num_cfg_vls, mtu);
6587 ppd->vls_supported = IB_VL_VL0_3; 6604 ppd->vls_supported = IB_VL_VL0_3;
6588 qib_num_cfg_vls = 4; 6605 qib_num_cfg_vls = 4;
@@ -7890,6 +7907,7 @@ static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7890static int serdes_7322_init(struct qib_pportdata *ppd) 7907static int serdes_7322_init(struct qib_pportdata *ppd)
7891{ 7908{
7892 int ret = 0; 7909 int ret = 0;
7910
7893 if (ppd->dd->cspec->r1) 7911 if (ppd->dd->cspec->r1)
7894 ret = serdes_7322_init_old(ppd); 7912 ret = serdes_7322_init_old(ppd);
7895 else 7913 else
@@ -8305,8 +8323,8 @@ static void force_h1(struct qib_pportdata *ppd)
8305 8323
8306static int qib_r_grab(struct qib_devdata *dd) 8324static int qib_r_grab(struct qib_devdata *dd)
8307{ 8325{
8308 u64 val; 8326 u64 val = SJA_EN;
8309 val = SJA_EN; 8327
8310 qib_write_kreg(dd, kr_r_access, val); 8328 qib_write_kreg(dd, kr_r_access, val);
8311 qib_read_kreg32(dd, kr_scratch); 8329 qib_read_kreg32(dd, kr_scratch);
8312 return 0; 8330 return 0;
@@ -8319,6 +8337,7 @@ static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8319{ 8337{
8320 u64 val; 8338 u64 val;
8321 int timeout; 8339 int timeout;
8340
8322 for (timeout = 0; timeout < 100 ; ++timeout) { 8341 for (timeout = 0; timeout < 100 ; ++timeout) {
8323 val = qib_read_kreg32(dd, kr_r_access); 8342 val = qib_read_kreg32(dd, kr_r_access);
8324 if (val & R_RDY) 8343 if (val & R_RDY)
@@ -8346,6 +8365,7 @@ static int qib_r_shift(struct qib_devdata *dd, int bisten,
8346 } 8365 }
8347 if (inp) { 8366 if (inp) {
8348 int tdi = inp[pos >> 3] >> (pos & 7); 8367 int tdi = inp[pos >> 3] >> (pos & 7);
8368
8349 val |= ((tdi & 1) << R_TDI_LSB); 8369 val |= ((tdi & 1) << R_TDI_LSB);
8350 } 8370 }
8351 qib_write_kreg(dd, kr_r_access, val); 8371 qib_write_kreg(dd, kr_r_access, val);
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 729da39c49ed..2ee36953e234 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -140,7 +140,7 @@ int qib_create_ctxts(struct qib_devdata *dd)
140 * Allocate full ctxtcnt array, rather than just cfgctxts, because 140 * Allocate full ctxtcnt array, rather than just cfgctxts, because
141 * cleanup iterates across all possible ctxts. 141 * cleanup iterates across all possible ctxts.
142 */ 142 */
143 dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); 143 dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
144 if (!dd->rcd) { 144 if (!dd->rcd) {
145 qib_dev_err(dd, 145 qib_dev_err(dd,
146 "Unable to allocate ctxtdata array, failing\n"); 146 "Unable to allocate ctxtdata array, failing\n");
@@ -234,6 +234,7 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
234 u8 hw_pidx, u8 port) 234 u8 hw_pidx, u8 port)
235{ 235{
236 int size; 236 int size;
237
237 ppd->dd = dd; 238 ppd->dd = dd;
238 ppd->hw_pidx = hw_pidx; 239 ppd->hw_pidx = hw_pidx;
239 ppd->port = port; /* IB port number, not index */ 240 ppd->port = port; /* IB port number, not index */
@@ -613,6 +614,7 @@ static int qib_create_workqueues(struct qib_devdata *dd)
613 ppd = dd->pport + pidx; 614 ppd = dd->pport + pidx;
614 if (!ppd->qib_wq) { 615 if (!ppd->qib_wq) {
615 char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */ 616 char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
617
616 snprintf(wq_name, sizeof(wq_name), "qib%d_%d", 618 snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
617 dd->unit, pidx); 619 dd->unit, pidx);
618 ppd->qib_wq = 620 ppd->qib_wq =
@@ -714,6 +716,7 @@ int qib_init(struct qib_devdata *dd, int reinit)
714 716
715 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 717 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
716 int mtu; 718 int mtu;
719
717 if (lastfail) 720 if (lastfail)
718 ret = lastfail; 721 ret = lastfail;
719 ppd = dd->pport + pidx; 722 ppd = dd->pport + pidx;
@@ -931,7 +934,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
931 qib_free_pportdata(ppd); 934 qib_free_pportdata(ppd);
932 } 935 }
933 936
934 qib_update_eeprom_log(dd);
935} 937}
936 938
937/** 939/**
@@ -1026,8 +1028,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
1026 addr = vmalloc(cnt); 1028 addr = vmalloc(cnt);
1027 if (!addr) { 1029 if (!addr) {
1028 qib_devinfo(dd->pcidev, 1030 qib_devinfo(dd->pcidev,
1029 "Couldn't get memory for checking PIO perf," 1031 "Couldn't get memory for checking PIO perf, skipping\n");
1030 " skipping\n");
1031 goto done; 1032 goto done;
1032 } 1033 }
1033 1034
@@ -1163,6 +1164,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
1163 1164
1164 if (!qib_cpulist_count) { 1165 if (!qib_cpulist_count) {
1165 u32 count = num_online_cpus(); 1166 u32 count = num_online_cpus();
1167
1166 qib_cpulist = kzalloc(BITS_TO_LONGS(count) * 1168 qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
1167 sizeof(long), GFP_KERNEL); 1169 sizeof(long), GFP_KERNEL);
1168 if (qib_cpulist) 1170 if (qib_cpulist)
@@ -1179,7 +1181,7 @@ bail:
1179 if (!list_empty(&dd->list)) 1181 if (!list_empty(&dd->list))
1180 list_del_init(&dd->list); 1182 list_del_init(&dd->list);
1181 ib_dealloc_device(&dd->verbs_dev.ibdev); 1183 ib_dealloc_device(&dd->verbs_dev.ibdev);
1182 return ERR_PTR(ret);; 1184 return ERR_PTR(ret);
1183} 1185}
1184 1186
1185/* 1187/*
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index f4918f2165ec..086616d071b9 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -168,7 +168,6 @@ skip_ibchange:
168 ppd->lastibcstat = ibcs; 168 ppd->lastibcstat = ibcs;
169 if (ev) 169 if (ev)
170 signal_ib_event(ppd, ev); 170 signal_ib_event(ppd, ev);
171 return;
172} 171}
173 172
174void qib_clear_symerror_on_linkup(unsigned long opaque) 173void qib_clear_symerror_on_linkup(unsigned long opaque)
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 3b9afccaaade..ad843c786e72 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -122,10 +122,10 @@ void qib_free_lkey(struct qib_mregion *mr)
122 if (!mr->lkey_published) 122 if (!mr->lkey_published)
123 goto out; 123 goto out;
124 if (lkey == 0) 124 if (lkey == 0)
125 rcu_assign_pointer(dev->dma_mr, NULL); 125 RCU_INIT_POINTER(dev->dma_mr, NULL);
126 else { 126 else {
127 r = lkey >> (32 - ib_qib_lkey_table_size); 127 r = lkey >> (32 - ib_qib_lkey_table_size);
128 rcu_assign_pointer(rkt->table[r], NULL); 128 RCU_INIT_POINTER(rkt->table[r], NULL);
129 } 129 }
130 qib_put_mr(mr); 130 qib_put_mr(mr);
131 mr->lkey_published = 0; 131 mr->lkey_published = 0;
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 636be117b578..395f4046dba2 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -152,14 +152,14 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
152 data.trap_num = trap_num; 152 data.trap_num = trap_num;
153 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 153 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
154 data.toggle_count = 0; 154 data.toggle_count = 0;
155 memset(&data.details, 0, sizeof data.details); 155 memset(&data.details, 0, sizeof(data.details));
156 data.details.ntc_257_258.lid1 = lid1; 156 data.details.ntc_257_258.lid1 = lid1;
157 data.details.ntc_257_258.lid2 = lid2; 157 data.details.ntc_257_258.lid2 = lid2;
158 data.details.ntc_257_258.key = cpu_to_be32(key); 158 data.details.ntc_257_258.key = cpu_to_be32(key);
159 data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1); 159 data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
160 data.details.ntc_257_258.qp2 = cpu_to_be32(qp2); 160 data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
161 161
162 qib_send_trap(ibp, &data, sizeof data); 162 qib_send_trap(ibp, &data, sizeof(data));
163} 163}
164 164
165/* 165/*
@@ -176,7 +176,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
176 data.trap_num = IB_NOTICE_TRAP_BAD_MKEY; 176 data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
177 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 177 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
178 data.toggle_count = 0; 178 data.toggle_count = 0;
179 memset(&data.details, 0, sizeof data.details); 179 memset(&data.details, 0, sizeof(data.details));
180 data.details.ntc_256.lid = data.issuer_lid; 180 data.details.ntc_256.lid = data.issuer_lid;
181 data.details.ntc_256.method = smp->method; 181 data.details.ntc_256.method = smp->method;
182 data.details.ntc_256.attr_id = smp->attr_id; 182 data.details.ntc_256.attr_id = smp->attr_id;
@@ -198,7 +198,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
198 hop_cnt); 198 hop_cnt);
199 } 199 }
200 200
201 qib_send_trap(ibp, &data, sizeof data); 201 qib_send_trap(ibp, &data, sizeof(data));
202} 202}
203 203
204/* 204/*
@@ -214,11 +214,11 @@ void qib_cap_mask_chg(struct qib_ibport *ibp)
214 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; 214 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
215 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 215 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
216 data.toggle_count = 0; 216 data.toggle_count = 0;
217 memset(&data.details, 0, sizeof data.details); 217 memset(&data.details, 0, sizeof(data.details));
218 data.details.ntc_144.lid = data.issuer_lid; 218 data.details.ntc_144.lid = data.issuer_lid;
219 data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); 219 data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
220 220
221 qib_send_trap(ibp, &data, sizeof data); 221 qib_send_trap(ibp, &data, sizeof(data));
222} 222}
223 223
224/* 224/*
@@ -234,11 +234,11 @@ void qib_sys_guid_chg(struct qib_ibport *ibp)
234 data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG; 234 data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
235 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 235 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
236 data.toggle_count = 0; 236 data.toggle_count = 0;
237 memset(&data.details, 0, sizeof data.details); 237 memset(&data.details, 0, sizeof(data.details));
238 data.details.ntc_145.lid = data.issuer_lid; 238 data.details.ntc_145.lid = data.issuer_lid;
239 data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid; 239 data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
240 240
241 qib_send_trap(ibp, &data, sizeof data); 241 qib_send_trap(ibp, &data, sizeof(data));
242} 242}
243 243
244/* 244/*
@@ -254,12 +254,12 @@ void qib_node_desc_chg(struct qib_ibport *ibp)
254 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; 254 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
255 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 255 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
256 data.toggle_count = 0; 256 data.toggle_count = 0;
257 memset(&data.details, 0, sizeof data.details); 257 memset(&data.details, 0, sizeof(data.details));
258 data.details.ntc_144.lid = data.issuer_lid; 258 data.details.ntc_144.lid = data.issuer_lid;
259 data.details.ntc_144.local_changes = 1; 259 data.details.ntc_144.local_changes = 1;
260 data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG; 260 data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
261 261
262 qib_send_trap(ibp, &data, sizeof data); 262 qib_send_trap(ibp, &data, sizeof(data));
263} 263}
264 264
265static int subn_get_nodedescription(struct ib_smp *smp, 265static int subn_get_nodedescription(struct ib_smp *smp,
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c
index 8b73a11d571c..146cf29a2e1d 100644
--- a/drivers/infiniband/hw/qib/qib_mmap.c
+++ b/drivers/infiniband/hw/qib/qib_mmap.c
@@ -134,7 +134,7 @@ struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
134 void *obj) { 134 void *obj) {
135 struct qib_mmap_info *ip; 135 struct qib_mmap_info *ip;
136 136
137 ip = kmalloc(sizeof *ip, GFP_KERNEL); 137 ip = kmalloc(sizeof(*ip), GFP_KERNEL);
138 if (!ip) 138 if (!ip)
139 goto bail; 139 goto bail;
140 140
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index a77fb4fb14e4..c4473db46699 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -55,7 +55,7 @@ static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
55 55
56 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; 56 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
57 for (; i < m; i++) { 57 for (; i < m; i++) {
58 mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL); 58 mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
59 if (!mr->map[i]) 59 if (!mr->map[i])
60 goto bail; 60 goto bail;
61 } 61 }
@@ -104,7 +104,7 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
104 goto bail; 104 goto bail;
105 } 105 }
106 106
107 mr = kzalloc(sizeof *mr, GFP_KERNEL); 107 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
108 if (!mr) { 108 if (!mr) {
109 ret = ERR_PTR(-ENOMEM); 109 ret = ERR_PTR(-ENOMEM);
110 goto bail; 110 goto bail;
@@ -143,7 +143,7 @@ static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
143 143
144 /* Allocate struct plus pointers to first level page tables. */ 144 /* Allocate struct plus pointers to first level page tables. */
145 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; 145 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
146 mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); 146 mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
147 if (!mr) 147 if (!mr)
148 goto bail; 148 goto bail;
149 149
@@ -347,7 +347,7 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
347 if (size > PAGE_SIZE) 347 if (size > PAGE_SIZE)
348 return ERR_PTR(-EINVAL); 348 return ERR_PTR(-EINVAL);
349 349
350 pl = kzalloc(sizeof *pl, GFP_KERNEL); 350 pl = kzalloc(sizeof(*pl), GFP_KERNEL);
351 if (!pl) 351 if (!pl)
352 return ERR_PTR(-ENOMEM); 352 return ERR_PTR(-ENOMEM);
353 353
@@ -386,7 +386,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
386 386
387 /* Allocate struct plus pointers to first level page tables. */ 387 /* Allocate struct plus pointers to first level page tables. */
388 m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; 388 m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
389 fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); 389 fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
390 if (!fmr) 390 if (!fmr)
391 goto bail; 391 goto bail;
392 392
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 61a0046efb76..4758a3801ae8 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -210,7 +210,7 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
210 /* We can't pass qib_msix_entry array to qib_msix_setup 210 /* We can't pass qib_msix_entry array to qib_msix_setup
211 * so use a dummy msix_entry array and copy the allocated 211 * so use a dummy msix_entry array and copy the allocated
212 * irq back to the qib_msix_entry array. */ 212 * irq back to the qib_msix_entry array. */
213 msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL); 213 msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL);
214 if (!msix_entry) 214 if (!msix_entry)
215 goto do_intx; 215 goto do_intx;
216 216
@@ -234,8 +234,10 @@ free_msix_entry:
234 kfree(msix_entry); 234 kfree(msix_entry);
235 235
236do_intx: 236do_intx:
237 qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, " 237 qib_dev_err(
238 "falling back to INTx\n", nvec, ret); 238 dd,
239 "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
240 nvec, ret);
239 *msixcnt = 0; 241 *msixcnt = 0;
240 qib_enable_intx(dd->pcidev); 242 qib_enable_intx(dd->pcidev);
241} 243}
@@ -459,6 +461,7 @@ void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
459void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline) 461void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
460{ 462{
461 int r; 463 int r;
464
462 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, 465 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
463 dd->pcibar0); 466 dd->pcibar0);
464 if (r) 467 if (r)
@@ -696,6 +699,7 @@ static void
696qib_pci_resume(struct pci_dev *pdev) 699qib_pci_resume(struct pci_dev *pdev)
697{ 700{
698 struct qib_devdata *dd = pci_get_drvdata(pdev); 701 struct qib_devdata *dd = pci_get_drvdata(pdev);
702
699 qib_devinfo(pdev, "QIB resume function called\n"); 703 qib_devinfo(pdev, "QIB resume function called\n");
700 pci_cleanup_aer_uncorrect_error_status(pdev); 704 pci_cleanup_aer_uncorrect_error_status(pdev);
701 /* 705 /*
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 6ddc0264aad2..4fa88ba2963e 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -255,10 +255,10 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
255 255
256 if (rcu_dereference_protected(ibp->qp0, 256 if (rcu_dereference_protected(ibp->qp0,
257 lockdep_is_held(&dev->qpt_lock)) == qp) { 257 lockdep_is_held(&dev->qpt_lock)) == qp) {
258 rcu_assign_pointer(ibp->qp0, NULL); 258 RCU_INIT_POINTER(ibp->qp0, NULL);
259 } else if (rcu_dereference_protected(ibp->qp1, 259 } else if (rcu_dereference_protected(ibp->qp1,
260 lockdep_is_held(&dev->qpt_lock)) == qp) { 260 lockdep_is_held(&dev->qpt_lock)) == qp) {
261 rcu_assign_pointer(ibp->qp1, NULL); 261 RCU_INIT_POINTER(ibp->qp1, NULL);
262 } else { 262 } else {
263 struct qib_qp *q; 263 struct qib_qp *q;
264 struct qib_qp __rcu **qpp; 264 struct qib_qp __rcu **qpp;
@@ -269,7 +269,7 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
269 lockdep_is_held(&dev->qpt_lock))) != NULL; 269 lockdep_is_held(&dev->qpt_lock))) != NULL;
270 qpp = &q->next) 270 qpp = &q->next)
271 if (q == qp) { 271 if (q == qp) {
272 rcu_assign_pointer(*qpp, 272 RCU_INIT_POINTER(*qpp,
273 rcu_dereference_protected(qp->next, 273 rcu_dereference_protected(qp->next,
274 lockdep_is_held(&dev->qpt_lock))); 274 lockdep_is_held(&dev->qpt_lock)));
275 removed = 1; 275 removed = 1;
@@ -315,7 +315,7 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
315 for (n = 0; n < dev->qp_table_size; n++) { 315 for (n = 0; n < dev->qp_table_size; n++) {
316 qp = rcu_dereference_protected(dev->qp_table[n], 316 qp = rcu_dereference_protected(dev->qp_table[n],
317 lockdep_is_held(&dev->qpt_lock)); 317 lockdep_is_held(&dev->qpt_lock));
318 rcu_assign_pointer(dev->qp_table[n], NULL); 318 RCU_INIT_POINTER(dev->qp_table[n], NULL);
319 319
320 for (; qp; qp = rcu_dereference_protected(qp->next, 320 for (; qp; qp = rcu_dereference_protected(qp->next,
321 lockdep_is_held(&dev->qpt_lock))) 321 lockdep_is_held(&dev->qpt_lock)))
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index fa71b1e666c5..5e27f76805e2 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -81,7 +81,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
81 * Module could take up to 2 Msec to respond to MOD_SEL, and there 81 * Module could take up to 2 Msec to respond to MOD_SEL, and there
82 * is no way to tell if it is ready, so we must wait. 82 * is no way to tell if it is ready, so we must wait.
83 */ 83 */
84 msleep(2); 84 msleep(20);
85 85
86 /* Make sure TWSI bus is in sane state. */ 86 /* Make sure TWSI bus is in sane state. */
87 ret = qib_twsi_reset(dd); 87 ret = qib_twsi_reset(dd);
@@ -99,6 +99,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
99 while (cnt < len) { 99 while (cnt < len) {
100 unsigned in_page; 100 unsigned in_page;
101 int wlen = len - cnt; 101 int wlen = len - cnt;
102
102 in_page = addr % QSFP_PAGESIZE; 103 in_page = addr % QSFP_PAGESIZE;
103 if ((in_page + wlen) > QSFP_PAGESIZE) 104 if ((in_page + wlen) > QSFP_PAGESIZE)
104 wlen = QSFP_PAGESIZE - in_page; 105 wlen = QSFP_PAGESIZE - in_page;
@@ -139,7 +140,7 @@ deselect:
139 else if (pass) 140 else if (pass)
140 qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass); 141 qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
141 142
142 msleep(2); 143 msleep(20);
143 144
144bail: 145bail:
145 mutex_unlock(&dd->eep_lock); 146 mutex_unlock(&dd->eep_lock);
@@ -189,7 +190,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
189 * Module could take up to 2 Msec to respond to MOD_SEL, 190 * Module could take up to 2 Msec to respond to MOD_SEL,
190 * and there is no way to tell if it is ready, so we must wait. 191 * and there is no way to tell if it is ready, so we must wait.
191 */ 192 */
192 msleep(2); 193 msleep(20);
193 194
194 /* Make sure TWSI bus is in sane state. */ 195 /* Make sure TWSI bus is in sane state. */
195 ret = qib_twsi_reset(dd); 196 ret = qib_twsi_reset(dd);
@@ -206,6 +207,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
206 while (cnt < len) { 207 while (cnt < len) {
207 unsigned in_page; 208 unsigned in_page;
208 int wlen = len - cnt; 209 int wlen = len - cnt;
210
209 in_page = addr % QSFP_PAGESIZE; 211 in_page = addr % QSFP_PAGESIZE;
210 if ((in_page + wlen) > QSFP_PAGESIZE) 212 if ((in_page + wlen) > QSFP_PAGESIZE)
211 wlen = QSFP_PAGESIZE - in_page; 213 wlen = QSFP_PAGESIZE - in_page;
@@ -234,7 +236,7 @@ deselect:
234 * going away, and there is no way to tell if it is ready. 236 * going away, and there is no way to tell if it is ready.
235 * so we must wait. 237 * so we must wait.
236 */ 238 */
237 msleep(2); 239 msleep(20);
238 240
239bail: 241bail:
240 mutex_unlock(&dd->eep_lock); 242 mutex_unlock(&dd->eep_lock);
@@ -296,6 +298,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
296 * set the page to zero, Even if it already appears to be zero. 298 * set the page to zero, Even if it already appears to be zero.
297 */ 299 */
298 u8 poke = 0; 300 u8 poke = 0;
301
299 ret = qib_qsfp_write(ppd, 127, &poke, 1); 302 ret = qib_qsfp_write(ppd, 127, &poke, 1);
300 udelay(50); 303 udelay(50);
301 if (ret != 1) { 304 if (ret != 1) {
@@ -480,7 +483,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
480 udelay(20); /* Generous RST dwell */ 483 udelay(20); /* Generous RST dwell */
481 484
482 dd->f_gpio_mod(dd, mask, mask, mask); 485 dd->f_gpio_mod(dd, mask, mask, mask);
483 return;
484} 486}
485 487
486void qib_qsfp_deinit(struct qib_qsfp_data *qd) 488void qib_qsfp_deinit(struct qib_qsfp_data *qd)
@@ -540,6 +542,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
540 542
541 while (bidx < QSFP_DEFAULT_HDR_CNT) { 543 while (bidx < QSFP_DEFAULT_HDR_CNT) {
542 int iidx; 544 int iidx;
545
543 ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK); 546 ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
544 if (ret < 0) 547 if (ret < 0)
545 goto bail; 548 goto bail;
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 2f2501890c4e..4544d6f88ad7 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1017,7 +1017,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
1017 /* Post a send completion queue entry if requested. */ 1017 /* Post a send completion queue entry if requested. */
1018 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || 1018 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
1019 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 1019 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1020 memset(&wc, 0, sizeof wc); 1020 memset(&wc, 0, sizeof(wc));
1021 wc.wr_id = wqe->wr.wr_id; 1021 wc.wr_id = wqe->wr.wr_id;
1022 wc.status = IB_WC_SUCCESS; 1022 wc.status = IB_WC_SUCCESS;
1023 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; 1023 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
@@ -1073,7 +1073,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
1073 /* Post a send completion queue entry if requested. */ 1073 /* Post a send completion queue entry if requested. */
1074 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || 1074 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
1075 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 1075 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1076 memset(&wc, 0, sizeof wc); 1076 memset(&wc, 0, sizeof(wc));
1077 wc.wr_id = wqe->wr.wr_id; 1077 wc.wr_id = wqe->wr.wr_id;
1078 wc.status = IB_WC_SUCCESS; 1078 wc.status = IB_WC_SUCCESS;
1079 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; 1079 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 4c07a8b34ffe..f42bd0f47577 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -247,8 +247,8 @@ static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
247 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 247 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
248 248
249 return ppd->guid; 249 return ppd->guid;
250 } else 250 }
251 return ibp->guids[index - 1]; 251 return ibp->guids[index - 1];
252} 252}
253 253
254static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) 254static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
@@ -420,7 +420,7 @@ again:
420 goto serr; 420 goto serr;
421 } 421 }
422 422
423 memset(&wc, 0, sizeof wc); 423 memset(&wc, 0, sizeof(wc));
424 send_status = IB_WC_SUCCESS; 424 send_status = IB_WC_SUCCESS;
425 425
426 release = 1; 426 release = 1;
@@ -792,7 +792,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
792 status != IB_WC_SUCCESS) { 792 status != IB_WC_SUCCESS) {
793 struct ib_wc wc; 793 struct ib_wc wc;
794 794
795 memset(&wc, 0, sizeof wc); 795 memset(&wc, 0, sizeof(wc));
796 wc.wr_id = wqe->wr.wr_id; 796 wc.wr_id = wqe->wr.wr_id;
797 wc.status = status; 797 wc.status = status;
798 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; 798 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 911205d3d5a0..c72775f27212 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -259,6 +259,7 @@ static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
259 * it again during startup. 259 * it again during startup.
260 */ 260 */
261 u64 val; 261 u64 val;
262
262 rst_val &= ~(1ULL); 263 rst_val &= ~(1ULL);
263 qib_write_kreg(dd, kr_hwerrmask, 264 qib_write_kreg(dd, kr_hwerrmask,
264 dd->cspec->hwerrmask & 265 dd->cspec->hwerrmask &
@@ -590,6 +591,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
590 * Both should be clear 591 * Both should be clear
591 */ 592 */
592 u64 newval = 0; 593 u64 newval = 0;
594
593 qib_write_kreg(dd, acc, newval); 595 qib_write_kreg(dd, acc, newval);
594 /* First read after write is not trustworthy */ 596 /* First read after write is not trustworthy */
595 pollval = qib_read_kreg32(dd, acc); 597 pollval = qib_read_kreg32(dd, acc);
@@ -601,6 +603,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
601 /* Need to claim */ 603 /* Need to claim */
602 u64 pollval; 604 u64 pollval;
603 u64 newval = EPB_ACC_REQ | oct_sel; 605 u64 newval = EPB_ACC_REQ | oct_sel;
606
604 qib_write_kreg(dd, acc, newval); 607 qib_write_kreg(dd, acc, newval);
605 /* First read after write is not trustworthy */ 608 /* First read after write is not trustworthy */
606 pollval = qib_read_kreg32(dd, acc); 609 pollval = qib_read_kreg32(dd, acc);
@@ -812,6 +815,7 @@ static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
812 if (!sofar) { 815 if (!sofar) {
813 /* Only set address at start of chunk */ 816 /* Only set address at start of chunk */
814 int addrbyte = (addr + sofar) >> 8; 817 int addrbyte = (addr + sofar) >> 8;
818
815 transval = csbit | EPB_MADDRH | addrbyte; 819 transval = csbit | EPB_MADDRH | addrbyte;
816 tries = epb_trans(dd, trans, transval, 820 tries = epb_trans(dd, trans, transval,
817 &transval); 821 &transval);
@@ -922,7 +926,7 @@ qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
922 * IRQ not set up at this point in init, so we poll. 926 * IRQ not set up at this point in init, so we poll.
923 */ 927 */
924#define IB_SERDES_TRIM_DONE (1ULL << 11) 928#define IB_SERDES_TRIM_DONE (1ULL << 11)
925#define TRIM_TMO (30) 929#define TRIM_TMO (15)
926 930
927static int qib_sd_trimdone_poll(struct qib_devdata *dd) 931static int qib_sd_trimdone_poll(struct qib_devdata *dd)
928{ 932{
@@ -940,7 +944,7 @@ static int qib_sd_trimdone_poll(struct qib_devdata *dd)
940 ret = 1; 944 ret = 1;
941 break; 945 break;
942 } 946 }
943 msleep(10); 947 msleep(20);
944 } 948 }
945 if (trim_tmo >= TRIM_TMO) { 949 if (trim_tmo >= TRIM_TMO) {
946 qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo); 950 qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
@@ -1071,6 +1075,7 @@ static int qib_sd_setvals(struct qib_devdata *dd)
1071 dds_reg_map >>= 4; 1075 dds_reg_map >>= 4;
1072 for (midx = 0; midx < DDS_ROWS; ++midx) { 1076 for (midx = 0; midx < DDS_ROWS; ++midx) {
1073 u64 __iomem *daddr = taddr + ((midx << 4) + idx); 1077 u64 __iomem *daddr = taddr + ((midx << 4) + idx);
1078
1074 data = dds_init_vals[midx].reg_vals[idx]; 1079 data = dds_init_vals[midx].reg_vals[idx];
1075 writeq(data, daddr); 1080 writeq(data, daddr);
1076 mmiowb(); 1081 mmiowb();
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 3c8e4e3caca6..81f56cdff2bc 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -586,8 +586,8 @@ static ssize_t show_serial(struct device *device,
586 container_of(device, struct qib_ibdev, ibdev.dev); 586 container_of(device, struct qib_ibdev, ibdev.dev);
587 struct qib_devdata *dd = dd_from_dev(dev); 587 struct qib_devdata *dd = dd_from_dev(dev);
588 588
589 buf[sizeof dd->serial] = '\0'; 589 buf[sizeof(dd->serial)] = '\0';
590 memcpy(buf, dd->serial, sizeof dd->serial); 590 memcpy(buf, dd->serial, sizeof(dd->serial));
591 strcat(buf, "\n"); 591 strcat(buf, "\n");
592 return strlen(buf); 592 return strlen(buf);
593} 593}
@@ -611,28 +611,6 @@ bail:
611 return ret < 0 ? ret : count; 611 return ret < 0 ? ret : count;
612} 612}
613 613
614static ssize_t show_logged_errs(struct device *device,
615 struct device_attribute *attr, char *buf)
616{
617 struct qib_ibdev *dev =
618 container_of(device, struct qib_ibdev, ibdev.dev);
619 struct qib_devdata *dd = dd_from_dev(dev);
620 int idx, count;
621
622 /* force consistency with actual EEPROM */
623 if (qib_update_eeprom_log(dd) != 0)
624 return -ENXIO;
625
626 count = 0;
627 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
628 count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
629 dd->eep_st_errs[idx],
630 idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
631 }
632
633 return count;
634}
635
636/* 614/*
637 * Dump tempsense regs. in decimal, to ease shell-scripts. 615 * Dump tempsense regs. in decimal, to ease shell-scripts.
638 */ 616 */
@@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
679static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL); 657static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
680static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); 658static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
681static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); 659static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
682static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
683static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); 660static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
684static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL); 661static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
685static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); 662static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
@@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
693 &dev_attr_nfreectxts, 670 &dev_attr_nfreectxts,
694 &dev_attr_serial, 671 &dev_attr_serial,
695 &dev_attr_boardversion, 672 &dev_attr_boardversion,
696 &dev_attr_logged_errors,
697 &dev_attr_tempsense, 673 &dev_attr_tempsense,
698 &dev_attr_localbus_info, 674 &dev_attr_localbus_info,
699 &dev_attr_chip_reset, 675 &dev_attr_chip_reset,
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
index 647f7beb1b0a..f5698664419b 100644
--- a/drivers/infiniband/hw/qib/qib_twsi.c
+++ b/drivers/infiniband/hw/qib/qib_twsi.c
@@ -105,6 +105,7 @@ static void scl_out(struct qib_devdata *dd, u8 bit)
105 udelay(2); 105 udelay(2);
106 else { 106 else {
107 int rise_usec; 107 int rise_usec;
108
108 for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) { 109 for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
109 if (mask & dd->f_gpio_mod(dd, 0, 0, 0)) 110 if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
110 break; 111 break;
@@ -326,6 +327,7 @@ int qib_twsi_reset(struct qib_devdata *dd)
326static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags) 327static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
327{ 328{
328 int ret = 1; 329 int ret = 1;
330
329 if (flags & QIB_TWSI_START) 331 if (flags & QIB_TWSI_START)
330 start_seq(dd); 332 start_seq(dd);
331 333
@@ -435,8 +437,7 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
435 int sub_len; 437 int sub_len;
436 const u8 *bp = buffer; 438 const u8 *bp = buffer;
437 int max_wait_time, i; 439 int max_wait_time, i;
438 int ret; 440 int ret = 1;
439 ret = 1;
440 441
441 while (len > 0) { 442 while (len > 0) {
442 if (dev == QIB_TWSI_NO_DEV) { 443 if (dev == QIB_TWSI_NO_DEV) {
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index 31d3561400a4..eface3b3dacf 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -180,6 +180,7 @@ void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
180 180
181 for (i = 0; i < cnt; i++) { 181 for (i = 0; i < cnt; i++) {
182 int which; 182 int which;
183
183 if (!test_bit(i, mask)) 184 if (!test_bit(i, mask))
184 continue; 185 continue;
185 /* 186 /*
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index aaf7039f8ed2..26243b722b5e 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -127,7 +127,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
127 * present on the wire. 127 * present on the wire.
128 */ 128 */
129 length = swqe->length; 129 length = swqe->length;
130 memset(&wc, 0, sizeof wc); 130 memset(&wc, 0, sizeof(wc));
131 wc.byte_len = length + sizeof(struct ib_grh); 131 wc.byte_len = length + sizeof(struct ib_grh);
132 132
133 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 133 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index d2806cae234c..3e0677c51276 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -50,7 +50,7 @@
50/* expected size of headers (for dma_pool) */ 50/* expected size of headers (for dma_pool) */
51#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64 51#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52/* attempt to drain the queue for 5secs */ 52/* attempt to drain the queue for 5secs */
53#define QIB_USER_SDMA_DRAIN_TIMEOUT 500 53#define QIB_USER_SDMA_DRAIN_TIMEOUT 250
54 54
55/* 55/*
56 * track how many times a process open this driver. 56 * track how many times a process open this driver.
@@ -226,6 +226,7 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
226 sdma_rb_node->refcount++; 226 sdma_rb_node->refcount++;
227 } else { 227 } else {
228 int ret; 228 int ret;
229
229 sdma_rb_node = kmalloc(sizeof( 230 sdma_rb_node = kmalloc(sizeof(
230 struct qib_user_sdma_rb_node), GFP_KERNEL); 231 struct qib_user_sdma_rb_node), GFP_KERNEL);
231 if (!sdma_rb_node) 232 if (!sdma_rb_node)
@@ -936,6 +937,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
936 937
937 if (tiddma) { 938 if (tiddma) {
938 char *tidsm = (char *)pkt + pktsize; 939 char *tidsm = (char *)pkt + pktsize;
940
939 cfur = copy_from_user(tidsm, 941 cfur = copy_from_user(tidsm,
940 iov[idx].iov_base, tidsmsize); 942 iov[idx].iov_base, tidsmsize);
941 if (cfur) { 943 if (cfur) {
@@ -1142,7 +1144,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
1142 qib_user_sdma_hwqueue_clean(ppd); 1144 qib_user_sdma_hwqueue_clean(ppd);
1143 qib_user_sdma_queue_clean(ppd, pq); 1145 qib_user_sdma_queue_clean(ppd, pq);
1144 mutex_unlock(&pq->lock); 1146 mutex_unlock(&pq->lock);
1145 msleep(10); 1147 msleep(20);
1146 } 1148 }
1147 1149
1148 if (pq->num_pending || pq->num_sending) { 1150 if (pq->num_pending || pq->num_sending) {
@@ -1316,8 +1318,6 @@ retry:
1316 1318
1317 if (nfree && !list_empty(pktlist)) 1319 if (nfree && !list_empty(pktlist))
1318 goto retry; 1320 goto retry;
1319
1320 return;
1321} 1321}
1322 1322
1323/* pq->lock must be held, get packets on the wire... */ 1323/* pq->lock must be held, get packets on the wire... */
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 9bcfbd842980..4a3599890ea5 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1342,6 +1342,7 @@ static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
1342done: 1342done:
1343 if (dd->flags & QIB_USE_SPCL_TRIG) { 1343 if (dd->flags & QIB_USE_SPCL_TRIG) {
1344 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; 1344 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1345
1345 qib_flush_wc(); 1346 qib_flush_wc();
1346 __raw_writel(0xaebecede, piobuf_orig + spcl_off); 1347 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1347 } 1348 }
@@ -1744,7 +1745,7 @@ static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
1744 * we allow allocations of more than we report for this value. 1745 * we allow allocations of more than we report for this value.
1745 */ 1746 */
1746 1747
1747 pd = kmalloc(sizeof *pd, GFP_KERNEL); 1748 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
1748 if (!pd) { 1749 if (!pd) {
1749 ret = ERR_PTR(-ENOMEM); 1750 ret = ERR_PTR(-ENOMEM);
1750 goto bail; 1751 goto bail;
@@ -1829,7 +1830,7 @@ static struct ib_ah *qib_create_ah(struct ib_pd *pd,
1829 goto bail; 1830 goto bail;
1830 } 1831 }
1831 1832
1832 ah = kmalloc(sizeof *ah, GFP_ATOMIC); 1833 ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
1833 if (!ah) { 1834 if (!ah) {
1834 ret = ERR_PTR(-ENOMEM); 1835 ret = ERR_PTR(-ENOMEM);
1835 goto bail; 1836 goto bail;
@@ -1862,7 +1863,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1862 struct ib_ah *ah = ERR_PTR(-EINVAL); 1863 struct ib_ah *ah = ERR_PTR(-EINVAL);
1863 struct qib_qp *qp0; 1864 struct qib_qp *qp0;
1864 1865
1865 memset(&attr, 0, sizeof attr); 1866 memset(&attr, 0, sizeof(attr));
1866 attr.dlid = dlid; 1867 attr.dlid = dlid;
1867 attr.port_num = ppd_from_ibp(ibp)->port; 1868 attr.port_num = ppd_from_ibp(ibp)->port;
1868 rcu_read_lock(); 1869 rcu_read_lock();
@@ -1977,7 +1978,7 @@ static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
1977 struct qib_ucontext *context; 1978 struct qib_ucontext *context;
1978 struct ib_ucontext *ret; 1979 struct ib_ucontext *ret;
1979 1980
1980 context = kmalloc(sizeof *context, GFP_KERNEL); 1981 context = kmalloc(sizeof(*context), GFP_KERNEL);
1981 if (!context) { 1982 if (!context) {
1982 ret = ERR_PTR(-ENOMEM); 1983 ret = ERR_PTR(-ENOMEM);
1983 goto bail; 1984 goto bail;
@@ -2054,7 +2055,9 @@ int qib_register_ib_device(struct qib_devdata *dd)
2054 2055
2055 dev->qp_table_size = ib_qib_qp_table_size; 2056 dev->qp_table_size = ib_qib_qp_table_size;
2056 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); 2057 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
2057 dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table, 2058 dev->qp_table = kmalloc_array(
2059 dev->qp_table_size,
2060 sizeof(*dev->qp_table),
2058 GFP_KERNEL); 2061 GFP_KERNEL);
2059 if (!dev->qp_table) { 2062 if (!dev->qp_table) {
2060 ret = -ENOMEM; 2063 ret = -ENOMEM;
@@ -2122,7 +2125,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
2122 for (i = 0; i < ppd->sdma_descq_cnt; i++) { 2125 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
2123 struct qib_verbs_txreq *tx; 2126 struct qib_verbs_txreq *tx;
2124 2127
2125 tx = kzalloc(sizeof *tx, GFP_KERNEL); 2128 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
2126 if (!tx) { 2129 if (!tx) {
2127 ret = -ENOMEM; 2130 ret = -ENOMEM;
2128 goto err_tx; 2131 goto err_tx;
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
index dabb697b1c2a..f8ea069a3eaf 100644
--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
@@ -43,7 +43,7 @@ static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
43{ 43{
44 struct qib_mcast_qp *mqp; 44 struct qib_mcast_qp *mqp;
45 45
46 mqp = kmalloc(sizeof *mqp, GFP_KERNEL); 46 mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
47 if (!mqp) 47 if (!mqp)
48 goto bail; 48 goto bail;
49 49
@@ -75,7 +75,7 @@ static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
75{ 75{
76 struct qib_mcast *mcast; 76 struct qib_mcast *mcast;
77 77
78 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 78 mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
79 if (!mcast) 79 if (!mcast)
80 goto bail; 80 goto bail;
81 81
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
index 1d7281c5a02e..81b225f2300a 100644
--- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -72,6 +72,7 @@ int qib_enable_wc(struct qib_devdata *dd)
72 if (dd->piobcnt2k && dd->piobcnt4k) { 72 if (dd->piobcnt2k && dd->piobcnt4k) {
73 /* 2 sizes for chip */ 73 /* 2 sizes for chip */
74 unsigned long pio2kbase, pio4kbase; 74 unsigned long pio2kbase, pio4kbase;
75
75 pio2kbase = dd->piobufbase & 0xffffffffUL; 76 pio2kbase = dd->piobufbase & 0xffffffffUL;
76 pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL; 77 pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
77 if (pio2kbase < pio4kbase) { 78 if (pio2kbase < pio4kbase) {
@@ -91,7 +92,7 @@ int qib_enable_wc(struct qib_devdata *dd)
91 } 92 }
92 93
93 for (bits = 0; !(piolen & (1ULL << bits)); bits++) 94 for (bits = 0; !(piolen & (1ULL << bits)); bits++)
94 /* do nothing */ ; 95 ; /* do nothing */
95 96
96 if (piolen != (1ULL << bits)) { 97 if (piolen != (1ULL << bits)) {
97 piolen >>= bits; 98 piolen >>= bits;
@@ -100,8 +101,8 @@ int qib_enable_wc(struct qib_devdata *dd)
100 piolen = 1ULL << (bits + 1); 101 piolen = 1ULL << (bits + 1);
101 } 102 }
102 if (pioaddr & (piolen - 1)) { 103 if (pioaddr & (piolen - 1)) {
103 u64 atmp; 104 u64 atmp = pioaddr & ~(piolen - 1);
104 atmp = pioaddr & ~(piolen - 1); 105
105 if (atmp < addr || (atmp + piolen) > (addr + len)) { 106 if (atmp < addr || (atmp + piolen) > (addr + len)) {
106 qib_dev_err(dd, 107 qib_dev_err(dd,
107 "No way to align address/size (%llx/%llx), no WC mtrr\n", 108 "No way to align address/size (%llx/%llx), no WC mtrr\n",
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 5ce26817e7e1..b47aea1094b2 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -654,7 +654,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
654 enum dma_data_direction dma_dir); 654 enum dma_data_direction dma_dir);
655 655
656void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, 656void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
657 struct iser_data_buf *data); 657 struct iser_data_buf *data,
658 enum dma_data_direction dir);
659
658int iser_initialize_task_headers(struct iscsi_task *task, 660int iser_initialize_task_headers(struct iscsi_task *task,
659 struct iser_tx_desc *tx_desc); 661 struct iser_tx_desc *tx_desc);
660int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, 662int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 3821633f1065..20e859a6f1a6 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -320,9 +320,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
320 struct ib_conn *ib_conn = &iser_conn->ib_conn; 320 struct ib_conn *ib_conn = &iser_conn->ib_conn;
321 struct iser_device *device = ib_conn->device; 321 struct iser_device *device = ib_conn->device;
322 322
323 if (!iser_conn->rx_descs)
324 goto free_login_buf;
325
326 if (device->iser_free_rdma_reg_res) 323 if (device->iser_free_rdma_reg_res)
327 device->iser_free_rdma_reg_res(ib_conn); 324 device->iser_free_rdma_reg_res(ib_conn);
328 325
@@ -334,7 +331,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
334 /* make sure we never redo any unmapping */ 331 /* make sure we never redo any unmapping */
335 iser_conn->rx_descs = NULL; 332 iser_conn->rx_descs = NULL;
336 333
337free_login_buf:
338 iser_free_login_buf(iser_conn); 334 iser_free_login_buf(iser_conn);
339} 335}
340 336
@@ -714,19 +710,23 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
714 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); 710 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
715 if (is_rdma_data_aligned) 711 if (is_rdma_data_aligned)
716 iser_dma_unmap_task_data(iser_task, 712 iser_dma_unmap_task_data(iser_task,
717 &iser_task->data[ISER_DIR_IN]); 713 &iser_task->data[ISER_DIR_IN],
714 DMA_FROM_DEVICE);
718 if (prot_count && is_rdma_prot_aligned) 715 if (prot_count && is_rdma_prot_aligned)
719 iser_dma_unmap_task_data(iser_task, 716 iser_dma_unmap_task_data(iser_task,
720 &iser_task->prot[ISER_DIR_IN]); 717 &iser_task->prot[ISER_DIR_IN],
718 DMA_FROM_DEVICE);
721 } 719 }
722 720
723 if (iser_task->dir[ISER_DIR_OUT]) { 721 if (iser_task->dir[ISER_DIR_OUT]) {
724 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); 722 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
725 if (is_rdma_data_aligned) 723 if (is_rdma_data_aligned)
726 iser_dma_unmap_task_data(iser_task, 724 iser_dma_unmap_task_data(iser_task,
727 &iser_task->data[ISER_DIR_OUT]); 725 &iser_task->data[ISER_DIR_OUT],
726 DMA_TO_DEVICE);
728 if (prot_count && is_rdma_prot_aligned) 727 if (prot_count && is_rdma_prot_aligned)
729 iser_dma_unmap_task_data(iser_task, 728 iser_dma_unmap_task_data(iser_task,
730 &iser_task->prot[ISER_DIR_OUT]); 729 &iser_task->prot[ISER_DIR_OUT],
730 DMA_TO_DEVICE);
731 } 731 }
732} 732}
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index abce9339333f..341040bf0984 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -332,12 +332,13 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
332} 332}
333 333
334void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, 334void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
335 struct iser_data_buf *data) 335 struct iser_data_buf *data,
336 enum dma_data_direction dir)
336{ 337{
337 struct ib_device *dev; 338 struct ib_device *dev;
338 339
339 dev = iser_task->iser_conn->ib_conn.device->ib_device; 340 dev = iser_task->iser_conn->ib_conn.device->ib_device;
340 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); 341 ib_dma_unmap_sg(dev, data->buf, data->size, dir);
341} 342}
342 343
343static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, 344static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
@@ -357,7 +358,9 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
357 iser_data_buf_dump(mem, ibdev); 358 iser_data_buf_dump(mem, ibdev);
358 359
359 /* unmap the command data before accessing it */ 360 /* unmap the command data before accessing it */
360 iser_dma_unmap_task_data(iser_task, mem); 361 iser_dma_unmap_task_data(iser_task, mem,
362 (cmd_dir == ISER_DIR_OUT) ?
363 DMA_TO_DEVICE : DMA_FROM_DEVICE);
361 364
362 /* allocate copy buf, if we are writing, copy the */ 365 /* allocate copy buf, if we are writing, copy the */
363 /* unaligned scatterlist, dma map the copy */ 366 /* unaligned scatterlist, dma map the copy */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 695a2704bd43..4065abe28829 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -600,16 +600,16 @@ void iser_release_work(struct work_struct *work)
600/** 600/**
601 * iser_free_ib_conn_res - release IB related resources 601 * iser_free_ib_conn_res - release IB related resources
602 * @iser_conn: iser connection struct 602 * @iser_conn: iser connection struct
603 * @destroy_device: indicator if we need to try to release 603 * @destroy: indicator if we need to try to release the
604 * the iser device (only iscsi shutdown and DEVICE_REMOVAL 604 * iser device and memory regoins pool (only iscsi
605 * will use this. 605 * shutdown and DEVICE_REMOVAL will use this).
606 * 606 *
607 * This routine is called with the iser state mutex held 607 * This routine is called with the iser state mutex held
608 * so the cm_id removal is out of here. It is Safe to 608 * so the cm_id removal is out of here. It is Safe to
609 * be invoked multiple times. 609 * be invoked multiple times.
610 */ 610 */
611static void iser_free_ib_conn_res(struct iser_conn *iser_conn, 611static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
612 bool destroy_device) 612 bool destroy)
613{ 613{
614 struct ib_conn *ib_conn = &iser_conn->ib_conn; 614 struct ib_conn *ib_conn = &iser_conn->ib_conn;
615 struct iser_device *device = ib_conn->device; 615 struct iser_device *device = ib_conn->device;
@@ -617,17 +617,20 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
617 iser_info("freeing conn %p cma_id %p qp %p\n", 617 iser_info("freeing conn %p cma_id %p qp %p\n",
618 iser_conn, ib_conn->cma_id, ib_conn->qp); 618 iser_conn, ib_conn->cma_id, ib_conn->qp);
619 619
620 iser_free_rx_descriptors(iser_conn);
621
622 if (ib_conn->qp != NULL) { 620 if (ib_conn->qp != NULL) {
623 ib_conn->comp->active_qps--; 621 ib_conn->comp->active_qps--;
624 rdma_destroy_qp(ib_conn->cma_id); 622 rdma_destroy_qp(ib_conn->cma_id);
625 ib_conn->qp = NULL; 623 ib_conn->qp = NULL;
626 } 624 }
627 625
628 if (destroy_device && device != NULL) { 626 if (destroy) {
629 iser_device_try_release(device); 627 if (iser_conn->rx_descs)
630 ib_conn->device = NULL; 628 iser_free_rx_descriptors(iser_conn);
629
630 if (device != NULL) {
631 iser_device_try_release(device);
632 ib_conn->device = NULL;
633 }
631 } 634 }
632} 635}
633 636
@@ -643,9 +646,11 @@ void iser_conn_release(struct iser_conn *iser_conn)
643 mutex_unlock(&ig.connlist_mutex); 646 mutex_unlock(&ig.connlist_mutex);
644 647
645 mutex_lock(&iser_conn->state_mutex); 648 mutex_lock(&iser_conn->state_mutex);
649 /* In case we endup here without ep_disconnect being invoked. */
646 if (iser_conn->state != ISER_CONN_DOWN) { 650 if (iser_conn->state != ISER_CONN_DOWN) {
647 iser_warn("iser conn %p state %d, expected state down.\n", 651 iser_warn("iser conn %p state %d, expected state down.\n",
648 iser_conn, iser_conn->state); 652 iser_conn, iser_conn->state);
653 iscsi_destroy_endpoint(iser_conn->ep);
649 iser_conn->state = ISER_CONN_DOWN; 654 iser_conn->state = ISER_CONN_DOWN;
650 } 655 }
651 /* 656 /*
@@ -840,7 +845,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
840} 845}
841 846
842static void iser_cleanup_handler(struct rdma_cm_id *cma_id, 847static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
843 bool destroy_device) 848 bool destroy)
844{ 849{
845 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; 850 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
846 851
@@ -850,7 +855,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
850 * and flush errors. 855 * and flush errors.
851 */ 856 */
852 iser_disconnected_handler(cma_id); 857 iser_disconnected_handler(cma_id);
853 iser_free_ib_conn_res(iser_conn, destroy_device); 858 iser_free_ib_conn_res(iser_conn, destroy);
854 complete(&iser_conn->ib_completion); 859 complete(&iser_conn->ib_completion);
855}; 860};
856 861
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index dafb3c531f96..075b19cc78e8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -38,7 +38,7 @@
38#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ 38#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
39 ISERT_MAX_CONN) 39 ISERT_MAX_CONN)
40 40
41int isert_debug_level = 0; 41static int isert_debug_level;
42module_param_named(debug_level, isert_debug_level, int, 0644); 42module_param_named(debug_level, isert_debug_level, int, 0644);
43MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); 43MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
44 44
@@ -949,7 +949,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
949 isert_err("ib_post_recv() failed with ret: %d\n", ret); 949 isert_err("ib_post_recv() failed with ret: %d\n", ret);
950 isert_conn->post_recv_buf_count -= count; 950 isert_conn->post_recv_buf_count -= count;
951 } else { 951 } else {
952 isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count); 952 isert_dbg("Posted %d RX buffers\n", count);
953 isert_conn->conn_rx_desc_head = rx_head; 953 isert_conn->conn_rx_desc_head = rx_head;
954 } 954 }
955 return ret; 955 return ret;
@@ -1351,17 +1351,19 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
1351 struct iscsi_conn *conn = isert_conn->conn; 1351 struct iscsi_conn *conn = isert_conn->conn;
1352 u32 payload_length = ntoh24(hdr->dlength); 1352 u32 payload_length = ntoh24(hdr->dlength);
1353 int rc; 1353 int rc;
1354 unsigned char *text_in; 1354 unsigned char *text_in = NULL;
1355 1355
1356 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 1356 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1357 if (rc < 0) 1357 if (rc < 0)
1358 return rc; 1358 return rc;
1359 1359
1360 text_in = kzalloc(payload_length, GFP_KERNEL); 1360 if (payload_length) {
1361 if (!text_in) { 1361 text_in = kzalloc(payload_length, GFP_KERNEL);
1362 isert_err("Unable to allocate text_in of payload_length: %u\n", 1362 if (!text_in) {
1363 payload_length); 1363 isert_err("Unable to allocate text_in of payload_length: %u\n",
1364 return -ENOMEM; 1364 payload_length);
1365 return -ENOMEM;
1366 }
1365 } 1367 }
1366 cmd->text_in_ptr = text_in; 1368 cmd->text_in_ptr = text_in;
1367 1369
@@ -1434,9 +1436,15 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1434 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1436 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1435 break; 1437 break;
1436 case ISCSI_OP_TEXT: 1438 case ISCSI_OP_TEXT:
1437 cmd = isert_allocate_cmd(conn); 1439 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
1438 if (!cmd) 1440 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1439 break; 1441 if (!cmd)
1442 break;
1443 } else {
1444 cmd = isert_allocate_cmd(conn);
1445 if (!cmd)
1446 break;
1447 }
1440 1448
1441 isert_cmd = iscsit_priv_cmd(cmd); 1449 isert_cmd = iscsit_priv_cmd(cmd);
1442 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1450 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
@@ -1658,6 +1666,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1658 struct isert_conn *isert_conn = isert_cmd->conn; 1666 struct isert_conn *isert_conn = isert_cmd->conn;
1659 struct iscsi_conn *conn = isert_conn->conn; 1667 struct iscsi_conn *conn = isert_conn->conn;
1660 struct isert_device *device = isert_conn->conn_device; 1668 struct isert_device *device = isert_conn->conn_device;
1669 struct iscsi_text_rsp *hdr;
1661 1670
1662 isert_dbg("Cmd %p\n", isert_cmd); 1671 isert_dbg("Cmd %p\n", isert_cmd);
1663 1672
@@ -1698,6 +1707,11 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1698 case ISCSI_OP_REJECT: 1707 case ISCSI_OP_REJECT:
1699 case ISCSI_OP_NOOP_OUT: 1708 case ISCSI_OP_NOOP_OUT:
1700 case ISCSI_OP_TEXT: 1709 case ISCSI_OP_TEXT:
1710 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1711 /* If the continue bit is on, keep the command alive */
1712 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1713 break;
1714
1701 spin_lock_bh(&conn->cmd_lock); 1715 spin_lock_bh(&conn->cmd_lock);
1702 if (!list_empty(&cmd->i_conn_node)) 1716 if (!list_empty(&cmd->i_conn_node))
1703 list_del_init(&cmd->i_conn_node); 1717 list_del_init(&cmd->i_conn_node);
@@ -1709,8 +1723,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1709 * associated cmd->se_cmd needs to be released. 1723 * associated cmd->se_cmd needs to be released.
1710 */ 1724 */
1711 if (cmd->se_cmd.se_tfo != NULL) { 1725 if (cmd->se_cmd.se_tfo != NULL) {
1712 isert_dbg("Calling transport_generic_free_cmd from" 1726 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1713 " isert_put_cmd for 0x%02x\n",
1714 cmd->iscsi_opcode); 1727 cmd->iscsi_opcode);
1715 transport_generic_free_cmd(&cmd->se_cmd, 0); 1728 transport_generic_free_cmd(&cmd->se_cmd, 0);
1716 break; 1729 break;
@@ -2275,7 +2288,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2275 } 2288 }
2276 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2289 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2277 2290
2278 isert_dbg("conn %p Text Reject\n", isert_conn); 2291 isert_dbg("conn %p Text Response\n", isert_conn);
2279 2292
2280 return isert_post_response(isert_conn, isert_cmd); 2293 return isert_post_response(isert_conn, isert_cmd);
2281} 2294}
@@ -3136,7 +3149,7 @@ accept_wait:
3136 spin_lock_bh(&np->np_thread_lock); 3149 spin_lock_bh(&np->np_thread_lock);
3137 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { 3150 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
3138 spin_unlock_bh(&np->np_thread_lock); 3151 spin_unlock_bh(&np->np_thread_lock);
3139 isert_dbg("np_thread_state %d for isert_accept_np\n", 3152 isert_dbg("np_thread_state %d\n",
3140 np->np_thread_state); 3153 np->np_thread_state);
3141 /** 3154 /**
3142 * No point in stalling here when np_thread 3155 * No point in stalling here when np_thread
@@ -3320,7 +3333,8 @@ static int __init isert_init(void)
3320{ 3333{
3321 int ret; 3334 int ret;
3322 3335
3323 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); 3336 isert_comp_wq = alloc_workqueue("isert_comp_wq",
3337 WQ_UNBOUND | WQ_HIGHPRI, 0);
3324 if (!isert_comp_wq) { 3338 if (!isert_comp_wq) {
3325 isert_err("Unable to allocate isert_comp_wq\n"); 3339 isert_err("Unable to allocate isert_comp_wq\n");
3326 ret = -ENOMEM; 3340 ret = -ENOMEM;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index eb694ddad79f..6e0a477681e9 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3518,7 +3518,7 @@ static void srpt_close_session(struct se_session *se_sess)
3518 DECLARE_COMPLETION_ONSTACK(release_done); 3518 DECLARE_COMPLETION_ONSTACK(release_done);
3519 struct srpt_rdma_ch *ch; 3519 struct srpt_rdma_ch *ch;
3520 struct srpt_device *sdev; 3520 struct srpt_device *sdev;
3521 int res; 3521 unsigned long res;
3522 3522
3523 ch = se_sess->fabric_sess_ptr; 3523 ch = se_sess->fabric_sess_ptr;
3524 WARN_ON(ch->sess != se_sess); 3524 WARN_ON(ch->sess != se_sess);
@@ -3533,7 +3533,7 @@ static void srpt_close_session(struct se_session *se_sess)
3533 spin_unlock_irq(&sdev->spinlock); 3533 spin_unlock_irq(&sdev->spinlock);
3534 3534
3535 res = wait_for_completion_timeout(&release_done, 60 * HZ); 3535 res = wait_for_completion_timeout(&release_done, 60 * HZ);
3536 WARN_ON(res <= 0); 3536 WARN_ON(res == 0);
3537} 3537}
3538 3538
3539/** 3539/**
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index b78425765d3e..d09cefa37931 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -535,8 +535,7 @@ static int adi_connect(struct gameport *gameport, struct gameport_driver *drv)
535 } 535 }
536 } 536 }
537 fail2: for (i = 0; i < 2; i++) 537 fail2: for (i = 0; i < 2; i++)
538 if (port->adi[i].dev) 538 input_free_device(port->adi[i].dev);
539 input_free_device(port->adi[i].dev);
540 gameport_close(gameport); 539 gameport_close(gameport);
541 fail1: gameport_set_drvdata(gameport, NULL); 540 fail1: gameport_set_drvdata(gameport, NULL);
542 kfree(port); 541 kfree(port);
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index a89488aa1aa4..fcef5d1365e2 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -345,13 +345,11 @@ static int pxa27x_keypad_build_keycode(struct pxa27x_keypad *keypad)
345{ 345{
346 const struct pxa27x_keypad_platform_data *pdata = keypad->pdata; 346 const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
347 struct input_dev *input_dev = keypad->input_dev; 347 struct input_dev *input_dev = keypad->input_dev;
348 const struct matrix_keymap_data *keymap_data =
349 pdata ? pdata->matrix_keymap_data : NULL;
350 unsigned short keycode; 348 unsigned short keycode;
351 int i; 349 int i;
352 int error; 350 int error;
353 351
354 error = matrix_keypad_build_keymap(keymap_data, NULL, 352 error = matrix_keypad_build_keymap(pdata->matrix_keymap_data, NULL,
355 pdata->matrix_key_rows, 353 pdata->matrix_key_rows,
356 pdata->matrix_key_cols, 354 pdata->matrix_key_cols,
357 keypad->keycodes, input_dev); 355 keypad->keycodes, input_dev);
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index 3f4351579372..a0fc18fdfc0c 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -7,29 +7,37 @@
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/interrupt.h> 9#include <linux/interrupt.h>
10#include <linux/io.h>
10#include <linux/irq.h> 11#include <linux/irq.h>
11#include <linux/pm.h> 12#include <linux/pm.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
13#include <linux/input.h> 14#include <linux/input.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/platform_data/bfin_rotary.h>
15 17
16#include <asm/portmux.h> 18#include <asm/portmux.h>
17#include <asm/bfin_rotary.h>
18 19
19static const u16 per_cnt[] = { 20#define CNT_CONFIG_OFF 0 /* CNT Config Offset */
20 P_CNT_CUD, 21#define CNT_IMASK_OFF 4 /* CNT Interrupt Mask Offset */
21 P_CNT_CDG, 22#define CNT_STATUS_OFF 8 /* CNT Status Offset */
22 P_CNT_CZM, 23#define CNT_COMMAND_OFF 12 /* CNT Command Offset */
23 0 24#define CNT_DEBOUNCE_OFF 16 /* CNT Debounce Offset */
24}; 25#define CNT_COUNTER_OFF 20 /* CNT Counter Offset */
26#define CNT_MAX_OFF 24 /* CNT Maximum Count Offset */
27#define CNT_MIN_OFF 28 /* CNT Minimum Count Offset */
25 28
26struct bfin_rot { 29struct bfin_rot {
27 struct input_dev *input; 30 struct input_dev *input;
31 void __iomem *base;
28 int irq; 32 int irq;
29 unsigned int up_key; 33 unsigned int up_key;
30 unsigned int down_key; 34 unsigned int down_key;
31 unsigned int button_key; 35 unsigned int button_key;
32 unsigned int rel_code; 36 unsigned int rel_code;
37
38 unsigned short mode;
39 unsigned short debounce;
40
33 unsigned short cnt_config; 41 unsigned short cnt_config;
34 unsigned short cnt_imask; 42 unsigned short cnt_imask;
35 unsigned short cnt_debounce; 43 unsigned short cnt_debounce;
@@ -59,18 +67,17 @@ static void report_rotary_event(struct bfin_rot *rotary, int delta)
59 67
60static irqreturn_t bfin_rotary_isr(int irq, void *dev_id) 68static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
61{ 69{
62 struct platform_device *pdev = dev_id; 70 struct bfin_rot *rotary = dev_id;
63 struct bfin_rot *rotary = platform_get_drvdata(pdev);
64 int delta; 71 int delta;
65 72
66 switch (bfin_read_CNT_STATUS()) { 73 switch (readw(rotary->base + CNT_STATUS_OFF)) {
67 74
68 case ICII: 75 case ICII:
69 break; 76 break;
70 77
71 case UCII: 78 case UCII:
72 case DCII: 79 case DCII:
73 delta = bfin_read_CNT_COUNTER(); 80 delta = readl(rotary->base + CNT_COUNTER_OFF);
74 if (delta) 81 if (delta)
75 report_rotary_event(rotary, delta); 82 report_rotary_event(rotary, delta);
76 break; 83 break;
@@ -83,16 +90,52 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
83 break; 90 break;
84 } 91 }
85 92
86 bfin_write_CNT_COMMAND(W1LCNT_ZERO); /* Clear COUNTER */ 93 writew(W1LCNT_ZERO, rotary->base + CNT_COMMAND_OFF); /* Clear COUNTER */
87 bfin_write_CNT_STATUS(-1); /* Clear STATUS */ 94 writew(-1, rotary->base + CNT_STATUS_OFF); /* Clear STATUS */
88 95
89 return IRQ_HANDLED; 96 return IRQ_HANDLED;
90} 97}
91 98
99static int bfin_rotary_open(struct input_dev *input)
100{
101 struct bfin_rot *rotary = input_get_drvdata(input);
102 unsigned short val;
103
104 if (rotary->mode & ROT_DEBE)
105 writew(rotary->debounce & DPRESCALE,
106 rotary->base + CNT_DEBOUNCE_OFF);
107
108 writew(rotary->mode & ~CNTE, rotary->base + CNT_CONFIG_OFF);
109
110 val = UCIE | DCIE;
111 if (rotary->button_key)
112 val |= CZMIE;
113 writew(val, rotary->base + CNT_IMASK_OFF);
114
115 writew(rotary->mode | CNTE, rotary->base + CNT_CONFIG_OFF);
116
117 return 0;
118}
119
120static void bfin_rotary_close(struct input_dev *input)
121{
122 struct bfin_rot *rotary = input_get_drvdata(input);
123
124 writew(0, rotary->base + CNT_CONFIG_OFF);
125 writew(0, rotary->base + CNT_IMASK_OFF);
126}
127
128static void bfin_rotary_free_action(void *data)
129{
130 peripheral_free_list(data);
131}
132
92static int bfin_rotary_probe(struct platform_device *pdev) 133static int bfin_rotary_probe(struct platform_device *pdev)
93{ 134{
94 struct bfin_rotary_platform_data *pdata = dev_get_platdata(&pdev->dev); 135 struct device *dev = &pdev->dev;
136 const struct bfin_rotary_platform_data *pdata = dev_get_platdata(dev);
95 struct bfin_rot *rotary; 137 struct bfin_rot *rotary;
138 struct resource *res;
96 struct input_dev *input; 139 struct input_dev *input;
97 int error; 140 int error;
98 141
@@ -102,18 +145,37 @@ static int bfin_rotary_probe(struct platform_device *pdev)
102 return -EINVAL; 145 return -EINVAL;
103 } 146 }
104 147
105 error = peripheral_request_list(per_cnt, dev_name(&pdev->dev)); 148 if (pdata->pin_list) {
106 if (error) { 149 error = peripheral_request_list(pdata->pin_list,
107 dev_err(&pdev->dev, "requesting peripherals failed\n"); 150 dev_name(&pdev->dev));
108 return error; 151 if (error) {
152 dev_err(dev, "requesting peripherals failed: %d\n",
153 error);
154 return error;
155 }
156
157 error = devm_add_action(dev, bfin_rotary_free_action,
158 pdata->pin_list);
159 if (error) {
160 dev_err(dev, "setting cleanup action failed: %d\n",
161 error);
162 peripheral_free_list(pdata->pin_list);
163 return error;
164 }
109 } 165 }
110 166
111 rotary = kzalloc(sizeof(struct bfin_rot), GFP_KERNEL); 167 rotary = devm_kzalloc(dev, sizeof(struct bfin_rot), GFP_KERNEL);
112 input = input_allocate_device(); 168 if (!rotary)
113 if (!rotary || !input) { 169 return -ENOMEM;
114 error = -ENOMEM; 170
115 goto out1; 171 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
116 } 172 rotary->base = devm_ioremap_resource(dev, res);
173 if (IS_ERR(rotary->base))
174 return PTR_ERR(rotary->base);
175
176 input = devm_input_allocate_device(dev);
177 if (!input)
178 return -ENOMEM;
117 179
118 rotary->input = input; 180 rotary->input = input;
119 181
@@ -122,9 +184,8 @@ static int bfin_rotary_probe(struct platform_device *pdev)
122 rotary->button_key = pdata->rotary_button_key; 184 rotary->button_key = pdata->rotary_button_key;
123 rotary->rel_code = pdata->rotary_rel_code; 185 rotary->rel_code = pdata->rotary_rel_code;
124 186
125 error = rotary->irq = platform_get_irq(pdev, 0); 187 rotary->mode = pdata->mode;
126 if (error < 0) 188 rotary->debounce = pdata->debounce;
127 goto out1;
128 189
129 input->name = pdev->name; 190 input->name = pdev->name;
130 input->phys = "bfin-rotary/input0"; 191 input->phys = "bfin-rotary/input0";
@@ -137,6 +198,9 @@ static int bfin_rotary_probe(struct platform_device *pdev)
137 input->id.product = 0x0001; 198 input->id.product = 0x0001;
138 input->id.version = 0x0100; 199 input->id.version = 0x0100;
139 200
201 input->open = bfin_rotary_open;
202 input->close = bfin_rotary_close;
203
140 if (rotary->up_key) { 204 if (rotary->up_key) {
141 __set_bit(EV_KEY, input->evbit); 205 __set_bit(EV_KEY, input->evbit);
142 __set_bit(rotary->up_key, input->keybit); 206 __set_bit(rotary->up_key, input->keybit);
@@ -151,75 +215,43 @@ static int bfin_rotary_probe(struct platform_device *pdev)
151 __set_bit(rotary->button_key, input->keybit); 215 __set_bit(rotary->button_key, input->keybit);
152 } 216 }
153 217
154 error = request_irq(rotary->irq, bfin_rotary_isr, 218 /* Quiesce the device before requesting irq */
155 0, dev_name(&pdev->dev), pdev); 219 bfin_rotary_close(input);
220
221 rotary->irq = platform_get_irq(pdev, 0);
222 if (rotary->irq < 0) {
223 dev_err(dev, "No rotary IRQ specified\n");
224 return -ENOENT;
225 }
226
227 error = devm_request_irq(dev, rotary->irq, bfin_rotary_isr,
228 0, dev_name(dev), rotary);
156 if (error) { 229 if (error) {
157 dev_err(&pdev->dev, 230 dev_err(dev, "unable to claim irq %d; error %d\n",
158 "unable to claim irq %d; error %d\n",
159 rotary->irq, error); 231 rotary->irq, error);
160 goto out1; 232 return error;
161 } 233 }
162 234
163 error = input_register_device(input); 235 error = input_register_device(input);
164 if (error) { 236 if (error) {
165 dev_err(&pdev->dev, 237 dev_err(dev, "unable to register input device (%d)\n", error);
166 "unable to register input device (%d)\n", error); 238 return error;
167 goto out2;
168 } 239 }
169 240
170 if (pdata->rotary_button_key)
171 bfin_write_CNT_IMASK(CZMIE);
172
173 if (pdata->mode & ROT_DEBE)
174 bfin_write_CNT_DEBOUNCE(pdata->debounce & DPRESCALE);
175
176 if (pdata->mode)
177 bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() |
178 (pdata->mode & ~CNTE));
179
180 bfin_write_CNT_IMASK(bfin_read_CNT_IMASK() | UCIE | DCIE);
181 bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() | CNTE);
182
183 platform_set_drvdata(pdev, rotary); 241 platform_set_drvdata(pdev, rotary);
184 device_init_wakeup(&pdev->dev, 1); 242 device_init_wakeup(&pdev->dev, 1);
185 243
186 return 0; 244 return 0;
187
188out2:
189 free_irq(rotary->irq, pdev);
190out1:
191 input_free_device(input);
192 kfree(rotary);
193 peripheral_free_list(per_cnt);
194
195 return error;
196} 245}
197 246
198static int bfin_rotary_remove(struct platform_device *pdev) 247static int __maybe_unused bfin_rotary_suspend(struct device *dev)
199{
200 struct bfin_rot *rotary = platform_get_drvdata(pdev);
201
202 bfin_write_CNT_CONFIG(0);
203 bfin_write_CNT_IMASK(0);
204
205 free_irq(rotary->irq, pdev);
206 input_unregister_device(rotary->input);
207 peripheral_free_list(per_cnt);
208
209 kfree(rotary);
210
211 return 0;
212}
213
214#ifdef CONFIG_PM
215static int bfin_rotary_suspend(struct device *dev)
216{ 248{
217 struct platform_device *pdev = to_platform_device(dev); 249 struct platform_device *pdev = to_platform_device(dev);
218 struct bfin_rot *rotary = platform_get_drvdata(pdev); 250 struct bfin_rot *rotary = platform_get_drvdata(pdev);
219 251
220 rotary->cnt_config = bfin_read_CNT_CONFIG(); 252 rotary->cnt_config = readw(rotary->base + CNT_CONFIG_OFF);
221 rotary->cnt_imask = bfin_read_CNT_IMASK(); 253 rotary->cnt_imask = readw(rotary->base + CNT_IMASK_OFF);
222 rotary->cnt_debounce = bfin_read_CNT_DEBOUNCE(); 254 rotary->cnt_debounce = readw(rotary->base + CNT_DEBOUNCE_OFF);
223 255
224 if (device_may_wakeup(&pdev->dev)) 256 if (device_may_wakeup(&pdev->dev))
225 enable_irq_wake(rotary->irq); 257 enable_irq_wake(rotary->irq);
@@ -227,38 +259,32 @@ static int bfin_rotary_suspend(struct device *dev)
227 return 0; 259 return 0;
228} 260}
229 261
230static int bfin_rotary_resume(struct device *dev) 262static int __maybe_unused bfin_rotary_resume(struct device *dev)
231{ 263{
232 struct platform_device *pdev = to_platform_device(dev); 264 struct platform_device *pdev = to_platform_device(dev);
233 struct bfin_rot *rotary = platform_get_drvdata(pdev); 265 struct bfin_rot *rotary = platform_get_drvdata(pdev);
234 266
235 bfin_write_CNT_DEBOUNCE(rotary->cnt_debounce); 267 writew(rotary->cnt_debounce, rotary->base + CNT_DEBOUNCE_OFF);
236 bfin_write_CNT_IMASK(rotary->cnt_imask); 268 writew(rotary->cnt_imask, rotary->base + CNT_IMASK_OFF);
237 bfin_write_CNT_CONFIG(rotary->cnt_config & ~CNTE); 269 writew(rotary->cnt_config & ~CNTE, rotary->base + CNT_CONFIG_OFF);
238 270
239 if (device_may_wakeup(&pdev->dev)) 271 if (device_may_wakeup(&pdev->dev))
240 disable_irq_wake(rotary->irq); 272 disable_irq_wake(rotary->irq);
241 273
242 if (rotary->cnt_config & CNTE) 274 if (rotary->cnt_config & CNTE)
243 bfin_write_CNT_CONFIG(rotary->cnt_config); 275 writew(rotary->cnt_config, rotary->base + CNT_CONFIG_OFF);
244 276
245 return 0; 277 return 0;
246} 278}
247 279
248static const struct dev_pm_ops bfin_rotary_pm_ops = { 280static SIMPLE_DEV_PM_OPS(bfin_rotary_pm_ops,
249 .suspend = bfin_rotary_suspend, 281 bfin_rotary_suspend, bfin_rotary_resume);
250 .resume = bfin_rotary_resume,
251};
252#endif
253 282
254static struct platform_driver bfin_rotary_device_driver = { 283static struct platform_driver bfin_rotary_device_driver = {
255 .probe = bfin_rotary_probe, 284 .probe = bfin_rotary_probe,
256 .remove = bfin_rotary_remove,
257 .driver = { 285 .driver = {
258 .name = "bfin-rotary", 286 .name = "bfin-rotary",
259#ifdef CONFIG_PM
260 .pm = &bfin_rotary_pm_ops, 287 .pm = &bfin_rotary_pm_ops,
261#endif
262 }, 288 },
263}; 289};
264module_platform_driver(bfin_rotary_device_driver); 290module_platform_driver(bfin_rotary_device_driver);
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 79cc0f79896f..e8e010a85484 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -195,7 +195,7 @@ static int soc_button_probe(struct platform_device *pdev)
195 195
196static struct soc_button_info soc_button_PNP0C40[] = { 196static struct soc_button_info soc_button_PNP0C40[] = {
197 { "power", 0, EV_KEY, KEY_POWER, false, true }, 197 { "power", 0, EV_KEY, KEY_POWER, false, true },
198 { "home", 1, EV_KEY, KEY_HOME, false, true }, 198 { "home", 1, EV_KEY, KEY_LEFTMETA, false, true },
199 { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false }, 199 { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false },
200 { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false }, 200 { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false },
201 { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false }, 201 { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false },
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index f205b8be2ce4..d28726a0ef85 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -99,36 +99,58 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
99#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ 99#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
100#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 100#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
101 6-byte ALPS packet */ 101 6-byte ALPS packet */
102#define ALPS_IS_RUSHMORE 0x100 /* device is a rushmore */
103#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ 102#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
104 103
105static const struct alps_model_info alps_model_data[] = { 104static const struct alps_model_info alps_model_data[] = {
106 { { 0x32, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */ 105 { { 0x32, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* Toshiba Salellite Pro M10 */
107 { { 0x33, 0x02, 0x0a }, 0x00, ALPS_PROTO_V1, 0x88, 0xf8, 0 }, /* UMAX-530T */ 106 { { 0x33, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V1, 0x88, 0xf8, 0 } }, /* UMAX-530T */
108 { { 0x53, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, 107 { { 0x53, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
109 { { 0x53, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, 108 { { 0x53, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
110 { { 0x60, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, /* HP ze1115 */ 109 { { 0x60, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, /* HP ze1115 */
111 { { 0x63, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, 110 { { 0x63, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
112 { { 0x63, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, 111 { { 0x63, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
113 { { 0x63, 0x02, 0x28 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Fujitsu Siemens S6010 */ 112 { { 0x63, 0x02, 0x28 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } }, /* Fujitsu Siemens S6010 */
114 { { 0x63, 0x02, 0x3c }, 0x00, ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */ 113 { { 0x63, 0x02, 0x3c }, 0x00, { ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL } }, /* Toshiba Satellite S2400-103 */
115 { { 0x63, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */ 114 { { 0x63, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 } }, /* NEC Versa L320 */
116 { { 0x63, 0x02, 0x64 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, 115 { { 0x63, 0x02, 0x64 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
117 { { 0x63, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D800 */ 116 { { 0x63, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* Dell Latitude D800 */
118 { { 0x73, 0x00, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT }, /* ThinkPad R61 8918-5QG */ 117 { { 0x73, 0x00, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT } }, /* ThinkPad R61 8918-5QG */
119 { { 0x73, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, 118 { { 0x73, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
120 { { 0x73, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */ 119 { { 0x73, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } }, /* Ahtec Laptop */
121 { { 0x20, 0x02, 0x0e }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */ 120
122 { { 0x22, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, 121 /*
123 { { 0x22, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */ 122 * XXX This entry is suspicious. First byte has zero lower nibble,
123 * which is what a normal mouse would report. Also, the value 0x0e
124 * isn't valid per PS/2 spec.
125 */
126 { { 0x20, 0x02, 0x0e }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },
127
128 { { 0x22, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },
129 { { 0x22, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT } }, /* Dell Latitude D600 */
124 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ 130 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
125 { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, 131 { { 0x62, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf,
126 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, 132 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } },
127 { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT }, /* Dell XT2 */ 133 { { 0x73, 0x00, 0x14 }, 0x00, { ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT } }, /* Dell XT2 */
128 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ 134 { { 0x73, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS } }, /* Dell Vostro 1400 */
129 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, 135 { { 0x52, 0x01, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff,
130 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ 136 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } }, /* Toshiba Tecra A11-11L */
131 { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 }, 137 { { 0x73, 0x02, 0x64 }, 0x8a, { ALPS_PROTO_V4, 0x8f, 0x8f, 0 } },
138};
139
140static const struct alps_protocol_info alps_v3_protocol_data = {
141 ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT
142};
143
144static const struct alps_protocol_info alps_v3_rushmore_data = {
145 ALPS_PROTO_V3_RUSHMORE, 0x8f, 0x8f, ALPS_DUALPOINT
146};
147
148static const struct alps_protocol_info alps_v5_protocol_data = {
149 ALPS_PROTO_V5, 0xc8, 0xd8, 0
150};
151
152static const struct alps_protocol_info alps_v7_protocol_data = {
153 ALPS_PROTO_V7, 0x48, 0x48, ALPS_DUALPOINT
132}; 154};
133 155
134static void alps_set_abs_params_st(struct alps_data *priv, 156static void alps_set_abs_params_st(struct alps_data *priv,
@@ -136,12 +158,6 @@ static void alps_set_abs_params_st(struct alps_data *priv,
136static void alps_set_abs_params_mt(struct alps_data *priv, 158static void alps_set_abs_params_mt(struct alps_data *priv,
137 struct input_dev *dev1); 159 struct input_dev *dev1);
138 160
139/*
140 * XXX - this entry is suspicious. First byte has zero lower nibble,
141 * which is what a normal mouse would report. Also, the value 0x0e
142 * isn't valid per PS/2 spec.
143 */
144
145/* Packet formats are described in Documentation/input/alps.txt */ 161/* Packet formats are described in Documentation/input/alps.txt */
146 162
147static bool alps_is_valid_first_byte(struct alps_data *priv, 163static bool alps_is_valid_first_byte(struct alps_data *priv,
@@ -150,8 +166,7 @@ static bool alps_is_valid_first_byte(struct alps_data *priv,
150 return (data & priv->mask0) == priv->byte0; 166 return (data & priv->mask0) == priv->byte0;
151} 167}
152 168
153static void alps_report_buttons(struct psmouse *psmouse, 169static void alps_report_buttons(struct input_dev *dev1, struct input_dev *dev2,
154 struct input_dev *dev1, struct input_dev *dev2,
155 int left, int right, int middle) 170 int left, int right, int middle)
156{ 171{
157 struct input_dev *dev; 172 struct input_dev *dev;
@@ -161,20 +176,21 @@ static void alps_report_buttons(struct psmouse *psmouse,
161 * other device (dev2) then this event should be also 176 * other device (dev2) then this event should be also
162 * sent through that device. 177 * sent through that device.
163 */ 178 */
164 dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1; 179 dev = (dev2 && test_bit(BTN_LEFT, dev2->key)) ? dev2 : dev1;
165 input_report_key(dev, BTN_LEFT, left); 180 input_report_key(dev, BTN_LEFT, left);
166 181
167 dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1; 182 dev = (dev2 && test_bit(BTN_RIGHT, dev2->key)) ? dev2 : dev1;
168 input_report_key(dev, BTN_RIGHT, right); 183 input_report_key(dev, BTN_RIGHT, right);
169 184
170 dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1; 185 dev = (dev2 && test_bit(BTN_MIDDLE, dev2->key)) ? dev2 : dev1;
171 input_report_key(dev, BTN_MIDDLE, middle); 186 input_report_key(dev, BTN_MIDDLE, middle);
172 187
173 /* 188 /*
174 * Sync the _other_ device now, we'll do the first 189 * Sync the _other_ device now, we'll do the first
175 * device later once we report the rest of the events. 190 * device later once we report the rest of the events.
176 */ 191 */
177 input_sync(dev2); 192 if (dev2)
193 input_sync(dev2);
178} 194}
179 195
180static void alps_process_packet_v1_v2(struct psmouse *psmouse) 196static void alps_process_packet_v1_v2(struct psmouse *psmouse)
@@ -221,13 +237,13 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
221 input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x)); 237 input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x));
222 input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y)); 238 input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
223 239
224 alps_report_buttons(psmouse, dev2, dev, left, right, middle); 240 alps_report_buttons(dev2, dev, left, right, middle);
225 241
226 input_sync(dev2); 242 input_sync(dev2);
227 return; 243 return;
228 } 244 }
229 245
230 alps_report_buttons(psmouse, dev, dev2, left, right, middle); 246 alps_report_buttons(dev, dev2, left, right, middle);
231 247
232 /* Convert hardware tap to a reasonable Z value */ 248 /* Convert hardware tap to a reasonable Z value */
233 if (ges && !fin) 249 if (ges && !fin)
@@ -412,7 +428,7 @@ static int alps_process_bitmap(struct alps_data *priv,
412 (2 * (priv->y_bits - 1)); 428 (2 * (priv->y_bits - 1));
413 429
414 /* y-bitmap order is reversed, except on rushmore */ 430 /* y-bitmap order is reversed, except on rushmore */
415 if (!(priv->flags & ALPS_IS_RUSHMORE)) { 431 if (priv->proto_version != ALPS_PROTO_V3_RUSHMORE) {
416 fields->mt[0].y = priv->y_max - fields->mt[0].y; 432 fields->mt[0].y = priv->y_max - fields->mt[0].y;
417 fields->mt[1].y = priv->y_max - fields->mt[1].y; 433 fields->mt[1].y = priv->y_max - fields->mt[1].y;
418 } 434 }
@@ -648,7 +664,8 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse)
648 */ 664 */
649 if (f->is_mp) { 665 if (f->is_mp) {
650 fingers = f->fingers; 666 fingers = f->fingers;
651 if (priv->proto_version == ALPS_PROTO_V3) { 667 if (priv->proto_version == ALPS_PROTO_V3 ||
668 priv->proto_version == ALPS_PROTO_V3_RUSHMORE) {
652 if (alps_process_bitmap(priv, f) == 0) 669 if (alps_process_bitmap(priv, f) == 0)
653 fingers = 0; /* Use st data */ 670 fingers = 0; /* Use st data */
654 671
@@ -892,34 +909,6 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
892 unsigned char *pkt, 909 unsigned char *pkt,
893 unsigned char pkt_id) 910 unsigned char pkt_id)
894{ 911{
895 /*
896 * packet-fmt b7 b6 b5 b4 b3 b2 b1 b0
897 * Byte0 TWO & MULTI L 1 R M 1 Y0-2 Y0-1 Y0-0
898 * Byte0 NEW L 1 X1-5 1 1 Y0-2 Y0-1 Y0-0
899 * Byte1 Y0-10 Y0-9 Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
900 * Byte2 X0-11 1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
901 * Byte3 X1-11 1 X0-4 X0-3 1 X0-2 X0-1 X0-0
902 * Byte4 TWO X1-10 TWO X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
903 * Byte4 MULTI X1-10 TWO X1-9 X1-8 X1-7 X1-6 Y1-5 1
904 * Byte4 NEW X1-10 TWO X1-9 X1-8 X1-7 X1-6 0 0
905 * Byte5 TWO & NEW Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
906 * Byte5 MULTI Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 F-1 F-0
907 * L: Left button
908 * R / M: Non-clickpads: Right / Middle button
909 * Clickpads: When > 2 fingers are down, and some fingers
910 * are in the button area, then the 2 coordinates reported
911 * are for fingers outside the button area and these report
912 * extra fingers being present in the right / left button
913 * area. Note these fingers are not added to the F field!
914 * so if a TWO packet is received and R = 1 then there are
915 * 3 fingers down, etc.
916 * TWO: 1: Two touches present, byte 0/4/5 are in TWO fmt
917 * 0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
918 * otherwise byte 0 bit 4 must be set and byte 0/4/5 are
919 * in NEW fmt
920 * F: Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
921 */
922
923 mt[0].x = ((pkt[2] & 0x80) << 4); 912 mt[0].x = ((pkt[2] & 0x80) << 4);
924 mt[0].x |= ((pkt[2] & 0x3F) << 5); 913 mt[0].x |= ((pkt[2] & 0x3F) << 5);
925 mt[0].x |= ((pkt[3] & 0x30) >> 1); 914 mt[0].x |= ((pkt[3] & 0x30) >> 1);
@@ -1044,17 +1033,6 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
1044 return; 1033 return;
1045 } 1034 }
1046 1035
1047 /*
1048 * b7 b6 b5 b4 b3 b2 b1 b0
1049 * Byte0 0 1 0 0 1 0 0 0
1050 * Byte1 1 1 * * 1 M R L
1051 * Byte2 X7 1 X5 X4 X3 X2 X1 X0
1052 * Byte3 Z6 1 Y6 X6 1 Y2 Y1 Y0
1053 * Byte4 Y7 0 Y5 Y4 Y3 1 1 0
1054 * Byte5 T&P 0 Z5 Z4 Z3 Z2 Z1 Z0
1055 * M / R / L: Middle / Right / Left button
1056 */
1057
1058 x = ((packet[2] & 0xbf)) | ((packet[3] & 0x10) << 2); 1036 x = ((packet[2] & 0xbf)) | ((packet[3] & 0x10) << 2);
1059 y = (packet[3] & 0x07) | (packet[4] & 0xb8) | 1037 y = (packet[3] & 0x07) | (packet[4] & 0xb8) |
1060 ((packet[3] & 0x20) << 1); 1038 ((packet[3] & 0x20) << 1);
@@ -1107,23 +1085,89 @@ static void alps_process_packet_v7(struct psmouse *psmouse)
1107 alps_process_touchpad_packet_v7(psmouse); 1085 alps_process_touchpad_packet_v7(psmouse);
1108} 1086}
1109 1087
1110static void alps_report_bare_ps2_packet(struct psmouse *psmouse, 1088static DEFINE_MUTEX(alps_mutex);
1089
1090static void alps_register_bare_ps2_mouse(struct work_struct *work)
1091{
1092 struct alps_data *priv =
1093 container_of(work, struct alps_data, dev3_register_work.work);
1094 struct psmouse *psmouse = priv->psmouse;
1095 struct input_dev *dev3;
1096 int error = 0;
1097
1098 mutex_lock(&alps_mutex);
1099
1100 if (priv->dev3)
1101 goto out;
1102
1103 dev3 = input_allocate_device();
1104 if (!dev3) {
1105 psmouse_err(psmouse, "failed to allocate secondary device\n");
1106 error = -ENOMEM;
1107 goto out;
1108 }
1109
1110 snprintf(priv->phys3, sizeof(priv->phys3), "%s/%s",
1111 psmouse->ps2dev.serio->phys,
1112 (priv->dev2 ? "input2" : "input1"));
1113 dev3->phys = priv->phys3;
1114
1115 /*
1116 * format of input device name is: "protocol vendor name"
1117 * see function psmouse_switch_protocol() in psmouse-base.c
1118 */
1119 dev3->name = "PS/2 ALPS Mouse";
1120
1121 dev3->id.bustype = BUS_I8042;
1122 dev3->id.vendor = 0x0002;
1123 dev3->id.product = PSMOUSE_PS2;
1124 dev3->id.version = 0x0000;
1125 dev3->dev.parent = &psmouse->ps2dev.serio->dev;
1126
1127 input_set_capability(dev3, EV_REL, REL_X);
1128 input_set_capability(dev3, EV_REL, REL_Y);
1129 input_set_capability(dev3, EV_KEY, BTN_LEFT);
1130 input_set_capability(dev3, EV_KEY, BTN_RIGHT);
1131 input_set_capability(dev3, EV_KEY, BTN_MIDDLE);
1132
1133 __set_bit(INPUT_PROP_POINTER, dev3->propbit);
1134
1135 error = input_register_device(dev3);
1136 if (error) {
1137 psmouse_err(psmouse,
1138 "failed to register secondary device: %d\n",
1139 error);
1140 input_free_device(dev3);
1141 goto out;
1142 }
1143
1144 priv->dev3 = dev3;
1145
1146out:
1147 /*
1148 * Save the error code so that we can detect that we
1149 * already tried to create the device.
1150 */
1151 if (error)
1152 priv->dev3 = ERR_PTR(error);
1153
1154 mutex_unlock(&alps_mutex);
1155}
1156
1157static void alps_report_bare_ps2_packet(struct input_dev *dev,
1111 unsigned char packet[], 1158 unsigned char packet[],
1112 bool report_buttons) 1159 bool report_buttons)
1113{ 1160{
1114 struct alps_data *priv = psmouse->private;
1115 struct input_dev *dev2 = priv->dev2;
1116
1117 if (report_buttons) 1161 if (report_buttons)
1118 alps_report_buttons(psmouse, dev2, psmouse->dev, 1162 alps_report_buttons(dev, NULL,
1119 packet[0] & 1, packet[0] & 2, packet[0] & 4); 1163 packet[0] & 1, packet[0] & 2, packet[0] & 4);
1120 1164
1121 input_report_rel(dev2, REL_X, 1165 input_report_rel(dev, REL_X,
1122 packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0); 1166 packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
1123 input_report_rel(dev2, REL_Y, 1167 input_report_rel(dev, REL_Y,
1124 packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0); 1168 packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
1125 1169
1126 input_sync(dev2); 1170 input_sync(dev);
1127} 1171}
1128 1172
1129static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse) 1173static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
@@ -1188,8 +1232,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
1188 * de-synchronization. 1232 * de-synchronization.
1189 */ 1233 */
1190 1234
1191 alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3], 1235 alps_report_bare_ps2_packet(priv->dev2,
1192 false); 1236 &psmouse->packet[3], false);
1193 1237
1194 /* 1238 /*
1195 * Continue with the standard ALPS protocol handling, 1239 * Continue with the standard ALPS protocol handling,
@@ -1245,9 +1289,18 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
1245 * properly we only do this if the device is fully synchronized. 1289 * properly we only do this if the device is fully synchronized.
1246 */ 1290 */
1247 if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) { 1291 if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
1292
1293 /* Register dev3 mouse if we received PS/2 packet first time */
1294 if (unlikely(!priv->dev3))
1295 psmouse_queue_work(psmouse,
1296 &priv->dev3_register_work, 0);
1297
1248 if (psmouse->pktcnt == 3) { 1298 if (psmouse->pktcnt == 3) {
1249 alps_report_bare_ps2_packet(psmouse, psmouse->packet, 1299 /* Once dev3 mouse device is registered report data */
1250 true); 1300 if (likely(!IS_ERR_OR_NULL(priv->dev3)))
1301 alps_report_bare_ps2_packet(priv->dev3,
1302 psmouse->packet,
1303 true);
1251 return PSMOUSE_FULL_PACKET; 1304 return PSMOUSE_FULL_PACKET;
1252 } 1305 }
1253 return PSMOUSE_GOOD_DATA; 1306 return PSMOUSE_GOOD_DATA;
@@ -1275,7 +1328,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
1275 psmouse->pktcnt - 1, 1328 psmouse->pktcnt - 1,
1276 psmouse->packet[psmouse->pktcnt - 1]); 1329 psmouse->packet[psmouse->pktcnt - 1]);
1277 1330
1278 if (priv->proto_version == ALPS_PROTO_V3 && 1331 if (priv->proto_version == ALPS_PROTO_V3_RUSHMORE &&
1279 psmouse->pktcnt == psmouse->pktsize) { 1332 psmouse->pktcnt == psmouse->pktsize) {
1280 /* 1333 /*
1281 * Some Dell boxes, such as Latitude E6440 or E7440 1334 * Some Dell boxes, such as Latitude E6440 or E7440
@@ -1780,7 +1833,7 @@ static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base)
1780 * all. 1833 * all.
1781 */ 1834 */
1782 if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_SETSCALE21, param)) { 1835 if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_SETSCALE21, param)) {
1783 psmouse_warn(psmouse, "trackstick E7 report failed\n"); 1836 psmouse_warn(psmouse, "Failed to initialize trackstick (E7 report failed)\n");
1784 ret = -ENODEV; 1837 ret = -ENODEV;
1785 } else { 1838 } else {
1786 psmouse_dbg(psmouse, "trackstick E7 report: %3ph\n", param); 1839 psmouse_dbg(psmouse, "trackstick E7 report: %3ph\n", param);
@@ -1945,8 +1998,6 @@ static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
1945 ALPS_REG_BASE_RUSHMORE); 1998 ALPS_REG_BASE_RUSHMORE);
1946 if (reg_val == -EIO) 1999 if (reg_val == -EIO)
1947 goto error; 2000 goto error;
1948 if (reg_val == -ENODEV)
1949 priv->flags &= ~ALPS_DUALPOINT;
1950 } 2001 }
1951 2002
1952 if (alps_enter_command_mode(psmouse) || 2003 if (alps_enter_command_mode(psmouse) ||
@@ -2162,11 +2213,18 @@ error:
2162 return ret; 2213 return ret;
2163} 2214}
2164 2215
2165static void alps_set_defaults(struct alps_data *priv) 2216static int alps_set_protocol(struct psmouse *psmouse,
2217 struct alps_data *priv,
2218 const struct alps_protocol_info *protocol)
2166{ 2219{
2167 priv->byte0 = 0x8f; 2220 psmouse->private = priv;
2168 priv->mask0 = 0x8f; 2221
2169 priv->flags = ALPS_DUALPOINT; 2222 setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
2223
2224 priv->proto_version = protocol->version;
2225 priv->byte0 = protocol->byte0;
2226 priv->mask0 = protocol->mask0;
2227 priv->flags = protocol->flags;
2170 2228
2171 priv->x_max = 2000; 2229 priv->x_max = 2000;
2172 priv->y_max = 1400; 2230 priv->y_max = 1400;
@@ -2182,6 +2240,7 @@ static void alps_set_defaults(struct alps_data *priv)
2182 priv->x_max = 1023; 2240 priv->x_max = 1023;
2183 priv->y_max = 767; 2241 priv->y_max = 767;
2184 break; 2242 break;
2243
2185 case ALPS_PROTO_V3: 2244 case ALPS_PROTO_V3:
2186 priv->hw_init = alps_hw_init_v3; 2245 priv->hw_init = alps_hw_init_v3;
2187 priv->process_packet = alps_process_packet_v3; 2246 priv->process_packet = alps_process_packet_v3;
@@ -2190,6 +2249,23 @@ static void alps_set_defaults(struct alps_data *priv)
2190 priv->nibble_commands = alps_v3_nibble_commands; 2249 priv->nibble_commands = alps_v3_nibble_commands;
2191 priv->addr_command = PSMOUSE_CMD_RESET_WRAP; 2250 priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
2192 break; 2251 break;
2252
2253 case ALPS_PROTO_V3_RUSHMORE:
2254 priv->hw_init = alps_hw_init_rushmore_v3;
2255 priv->process_packet = alps_process_packet_v3;
2256 priv->set_abs_params = alps_set_abs_params_mt;
2257 priv->decode_fields = alps_decode_rushmore;
2258 priv->nibble_commands = alps_v3_nibble_commands;
2259 priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
2260 priv->x_bits = 16;
2261 priv->y_bits = 12;
2262
2263 if (alps_probe_trackstick_v3(psmouse,
2264 ALPS_REG_BASE_RUSHMORE) < 0)
2265 priv->flags &= ~ALPS_DUALPOINT;
2266
2267 break;
2268
2193 case ALPS_PROTO_V4: 2269 case ALPS_PROTO_V4:
2194 priv->hw_init = alps_hw_init_v4; 2270 priv->hw_init = alps_hw_init_v4;
2195 priv->process_packet = alps_process_packet_v4; 2271 priv->process_packet = alps_process_packet_v4;
@@ -2197,6 +2273,7 @@ static void alps_set_defaults(struct alps_data *priv)
2197 priv->nibble_commands = alps_v4_nibble_commands; 2273 priv->nibble_commands = alps_v4_nibble_commands;
2198 priv->addr_command = PSMOUSE_CMD_DISABLE; 2274 priv->addr_command = PSMOUSE_CMD_DISABLE;
2199 break; 2275 break;
2276
2200 case ALPS_PROTO_V5: 2277 case ALPS_PROTO_V5:
2201 priv->hw_init = alps_hw_init_dolphin_v1; 2278 priv->hw_init = alps_hw_init_dolphin_v1;
2202 priv->process_packet = alps_process_touchpad_packet_v3_v5; 2279 priv->process_packet = alps_process_touchpad_packet_v3_v5;
@@ -2204,14 +2281,12 @@ static void alps_set_defaults(struct alps_data *priv)
2204 priv->set_abs_params = alps_set_abs_params_mt; 2281 priv->set_abs_params = alps_set_abs_params_mt;
2205 priv->nibble_commands = alps_v3_nibble_commands; 2282 priv->nibble_commands = alps_v3_nibble_commands;
2206 priv->addr_command = PSMOUSE_CMD_RESET_WRAP; 2283 priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
2207 priv->byte0 = 0xc8;
2208 priv->mask0 = 0xd8;
2209 priv->flags = 0;
2210 priv->x_max = 1360; 2284 priv->x_max = 1360;
2211 priv->y_max = 660; 2285 priv->y_max = 660;
2212 priv->x_bits = 23; 2286 priv->x_bits = 23;
2213 priv->y_bits = 12; 2287 priv->y_bits = 12;
2214 break; 2288 break;
2289
2215 case ALPS_PROTO_V6: 2290 case ALPS_PROTO_V6:
2216 priv->hw_init = alps_hw_init_v6; 2291 priv->hw_init = alps_hw_init_v6;
2217 priv->process_packet = alps_process_packet_v6; 2292 priv->process_packet = alps_process_packet_v6;
@@ -2220,6 +2295,7 @@ static void alps_set_defaults(struct alps_data *priv)
2220 priv->x_max = 2047; 2295 priv->x_max = 2047;
2221 priv->y_max = 1535; 2296 priv->y_max = 1535;
2222 break; 2297 break;
2298
2223 case ALPS_PROTO_V7: 2299 case ALPS_PROTO_V7:
2224 priv->hw_init = alps_hw_init_v7; 2300 priv->hw_init = alps_hw_init_v7;
2225 priv->process_packet = alps_process_packet_v7; 2301 priv->process_packet = alps_process_packet_v7;
@@ -2227,19 +2303,21 @@ static void alps_set_defaults(struct alps_data *priv)
2227 priv->set_abs_params = alps_set_abs_params_mt; 2303 priv->set_abs_params = alps_set_abs_params_mt;
2228 priv->nibble_commands = alps_v3_nibble_commands; 2304 priv->nibble_commands = alps_v3_nibble_commands;
2229 priv->addr_command = PSMOUSE_CMD_RESET_WRAP; 2305 priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
2230 priv->x_max = 0xfff; 2306
2231 priv->y_max = 0x7ff; 2307 if (alps_dolphin_get_device_area(psmouse, priv))
2232 priv->byte0 = 0x48; 2308 return -EIO;
2233 priv->mask0 = 0x48;
2234 2309
2235 if (priv->fw_ver[1] != 0xba) 2310 if (priv->fw_ver[1] != 0xba)
2236 priv->flags |= ALPS_BUTTONPAD; 2311 priv->flags |= ALPS_BUTTONPAD;
2312
2237 break; 2313 break;
2238 } 2314 }
2315
2316 return 0;
2239} 2317}
2240 2318
2241static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv, 2319static const struct alps_protocol_info *alps_match_table(unsigned char *e7,
2242 unsigned char *e7, unsigned char *ec) 2320 unsigned char *ec)
2243{ 2321{
2244 const struct alps_model_info *model; 2322 const struct alps_model_info *model;
2245 int i; 2323 int i;
@@ -2251,23 +2329,18 @@ static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv,
2251 (!model->command_mode_resp || 2329 (!model->command_mode_resp ||
2252 model->command_mode_resp == ec[2])) { 2330 model->command_mode_resp == ec[2])) {
2253 2331
2254 priv->proto_version = model->proto_version; 2332 return &model->protocol_info;
2255 alps_set_defaults(priv);
2256
2257 priv->flags = model->flags;
2258 priv->byte0 = model->byte0;
2259 priv->mask0 = model->mask0;
2260
2261 return 0;
2262 } 2333 }
2263 } 2334 }
2264 2335
2265 return -EINVAL; 2336 return NULL;
2266} 2337}
2267 2338
2268static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) 2339static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
2269{ 2340{
2341 const struct alps_protocol_info *protocol;
2270 unsigned char e6[4], e7[4], ec[4]; 2342 unsigned char e6[4], e7[4], ec[4];
2343 int error;
2271 2344
2272 /* 2345 /*
2273 * First try "E6 report". 2346 * First try "E6 report".
@@ -2293,54 +2366,35 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
2293 alps_exit_command_mode(psmouse)) 2366 alps_exit_command_mode(psmouse))
2294 return -EIO; 2367 return -EIO;
2295 2368
2296 /* Save the Firmware version */ 2369 protocol = alps_match_table(e7, ec);
2297 memcpy(priv->fw_ver, ec, 3); 2370 if (!protocol) {
2298 2371 if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
2299 if (alps_match_table(psmouse, priv, e7, ec) == 0) { 2372 ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) {
2300 return 0; 2373 protocol = &alps_v5_protocol_data;
2301 } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 && 2374 } else if (ec[0] == 0x88 &&
2302 ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) { 2375 ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) {
2303 priv->proto_version = ALPS_PROTO_V5; 2376 protocol = &alps_v7_protocol_data;
2304 alps_set_defaults(priv); 2377 } else if (ec[0] == 0x88 && ec[1] == 0x08) {
2305 if (alps_dolphin_get_device_area(psmouse, priv)) 2378 protocol = &alps_v3_rushmore_data;
2306 return -EIO; 2379 } else if (ec[0] == 0x88 && ec[1] == 0x07 &&
2307 else 2380 ec[2] >= 0x90 && ec[2] <= 0x9d) {
2308 return 0; 2381 protocol = &alps_v3_protocol_data;
2309 } else if (ec[0] == 0x88 && 2382 } else {
2310 ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) { 2383 psmouse_dbg(psmouse,
2311 priv->proto_version = ALPS_PROTO_V7; 2384 "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
2312 alps_set_defaults(priv); 2385 return -EINVAL;
2313 2386 }
2314 return 0;
2315 } else if (ec[0] == 0x88 && ec[1] == 0x08) {
2316 priv->proto_version = ALPS_PROTO_V3;
2317 alps_set_defaults(priv);
2318
2319 priv->hw_init = alps_hw_init_rushmore_v3;
2320 priv->decode_fields = alps_decode_rushmore;
2321 priv->x_bits = 16;
2322 priv->y_bits = 12;
2323 priv->flags |= ALPS_IS_RUSHMORE;
2324
2325 /* hack to make addr_command, nibble_command available */
2326 psmouse->private = priv;
2327
2328 if (alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_RUSHMORE))
2329 priv->flags &= ~ALPS_DUALPOINT;
2330
2331 return 0;
2332 } else if (ec[0] == 0x88 && ec[1] == 0x07 &&
2333 ec[2] >= 0x90 && ec[2] <= 0x9d) {
2334 priv->proto_version = ALPS_PROTO_V3;
2335 alps_set_defaults(priv);
2336
2337 return 0;
2338 } 2387 }
2339 2388
2340 psmouse_dbg(psmouse, 2389 if (priv) {
2341 "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec); 2390 /* Save the Firmware version */
2391 memcpy(priv->fw_ver, ec, 3);
2392 error = alps_set_protocol(psmouse, priv, protocol);
2393 if (error)
2394 return error;
2395 }
2342 2396
2343 return -EINVAL; 2397 return 0;
2344} 2398}
2345 2399
2346static int alps_reconnect(struct psmouse *psmouse) 2400static int alps_reconnect(struct psmouse *psmouse)
@@ -2361,7 +2415,10 @@ static void alps_disconnect(struct psmouse *psmouse)
2361 2415
2362 psmouse_reset(psmouse); 2416 psmouse_reset(psmouse);
2363 del_timer_sync(&priv->timer); 2417 del_timer_sync(&priv->timer);
2364 input_unregister_device(priv->dev2); 2418 if (priv->dev2)
2419 input_unregister_device(priv->dev2);
2420 if (!IS_ERR_OR_NULL(priv->dev3))
2421 input_unregister_device(priv->dev3);
2365 kfree(priv); 2422 kfree(priv);
2366} 2423}
2367 2424
@@ -2394,25 +2451,12 @@ static void alps_set_abs_params_mt(struct alps_data *priv,
2394 2451
2395int alps_init(struct psmouse *psmouse) 2452int alps_init(struct psmouse *psmouse)
2396{ 2453{
2397 struct alps_data *priv; 2454 struct alps_data *priv = psmouse->private;
2398 struct input_dev *dev1 = psmouse->dev, *dev2; 2455 struct input_dev *dev1 = psmouse->dev;
2399 2456 int error;
2400 priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
2401 dev2 = input_allocate_device();
2402 if (!priv || !dev2)
2403 goto init_fail;
2404
2405 priv->dev2 = dev2;
2406 setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
2407
2408 psmouse->private = priv;
2409
2410 psmouse_reset(psmouse);
2411
2412 if (alps_identify(psmouse, priv) < 0)
2413 goto init_fail;
2414 2457
2415 if (priv->hw_init(psmouse)) 2458 error = priv->hw_init(psmouse);
2459 if (error)
2416 goto init_fail; 2460 goto init_fail;
2417 2461
2418 /* 2462 /*
@@ -2462,36 +2506,57 @@ int alps_init(struct psmouse *psmouse)
2462 } 2506 }
2463 2507
2464 if (priv->flags & ALPS_DUALPOINT) { 2508 if (priv->flags & ALPS_DUALPOINT) {
2509 struct input_dev *dev2;
2510
2511 dev2 = input_allocate_device();
2512 if (!dev2) {
2513 psmouse_err(psmouse,
2514 "failed to allocate trackstick device\n");
2515 error = -ENOMEM;
2516 goto init_fail;
2517 }
2518
2519 snprintf(priv->phys2, sizeof(priv->phys2), "%s/input1",
2520 psmouse->ps2dev.serio->phys);
2521 dev2->phys = priv->phys2;
2522
2465 /* 2523 /*
2466 * format of input device name is: "protocol vendor name" 2524 * format of input device name is: "protocol vendor name"
2467 * see function psmouse_switch_protocol() in psmouse-base.c 2525 * see function psmouse_switch_protocol() in psmouse-base.c
2468 */ 2526 */
2469 dev2->name = "AlpsPS/2 ALPS DualPoint Stick"; 2527 dev2->name = "AlpsPS/2 ALPS DualPoint Stick";
2528
2529 dev2->id.bustype = BUS_I8042;
2530 dev2->id.vendor = 0x0002;
2470 dev2->id.product = PSMOUSE_ALPS; 2531 dev2->id.product = PSMOUSE_ALPS;
2471 dev2->id.version = priv->proto_version; 2532 dev2->id.version = priv->proto_version;
2472 } else { 2533 dev2->dev.parent = &psmouse->ps2dev.serio->dev;
2473 dev2->name = "PS/2 ALPS Mouse";
2474 dev2->id.product = PSMOUSE_PS2;
2475 dev2->id.version = 0x0000;
2476 }
2477 2534
2478 snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys); 2535 input_set_capability(dev2, EV_REL, REL_X);
2479 dev2->phys = priv->phys; 2536 input_set_capability(dev2, EV_REL, REL_Y);
2480 dev2->id.bustype = BUS_I8042; 2537 input_set_capability(dev2, EV_KEY, BTN_LEFT);
2481 dev2->id.vendor = 0x0002; 2538 input_set_capability(dev2, EV_KEY, BTN_RIGHT);
2482 dev2->dev.parent = &psmouse->ps2dev.serio->dev; 2539 input_set_capability(dev2, EV_KEY, BTN_MIDDLE);
2483 2540
2484 dev2->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); 2541 __set_bit(INPUT_PROP_POINTER, dev2->propbit);
2485 dev2->relbit[BIT_WORD(REL_X)] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
2486 dev2->keybit[BIT_WORD(BTN_LEFT)] =
2487 BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
2488
2489 __set_bit(INPUT_PROP_POINTER, dev2->propbit);
2490 if (priv->flags & ALPS_DUALPOINT)
2491 __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit); 2542 __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit);
2492 2543
2493 if (input_register_device(priv->dev2)) 2544 error = input_register_device(dev2);
2494 goto init_fail; 2545 if (error) {
2546 psmouse_err(psmouse,
2547 "failed to register trackstick device: %d\n",
2548 error);
2549 input_free_device(dev2);
2550 goto init_fail;
2551 }
2552
2553 priv->dev2 = dev2;
2554 }
2555
2556 priv->psmouse = psmouse;
2557
2558 INIT_DELAYED_WORK(&priv->dev3_register_work,
2559 alps_register_bare_ps2_mouse);
2495 2560
2496 psmouse->protocol_handler = alps_process_byte; 2561 psmouse->protocol_handler = alps_process_byte;
2497 psmouse->poll = alps_poll; 2562 psmouse->poll = alps_poll;
@@ -2509,25 +2574,56 @@ int alps_init(struct psmouse *psmouse)
2509 2574
2510init_fail: 2575init_fail:
2511 psmouse_reset(psmouse); 2576 psmouse_reset(psmouse);
2512 input_free_device(dev2); 2577 /*
2513 kfree(priv); 2578 * Even though we did not allocate psmouse->private we do free
2579 * it here.
2580 */
2581 kfree(psmouse->private);
2514 psmouse->private = NULL; 2582 psmouse->private = NULL;
2515 return -1; 2583 return error;
2516} 2584}
2517 2585
2518int alps_detect(struct psmouse *psmouse, bool set_properties) 2586int alps_detect(struct psmouse *psmouse, bool set_properties)
2519{ 2587{
2520 struct alps_data dummy; 2588 struct alps_data *priv;
2589 int error;
2521 2590
2522 if (alps_identify(psmouse, &dummy) < 0) 2591 error = alps_identify(psmouse, NULL);
2523 return -1; 2592 if (error)
2593 return error;
2594
2595 /*
2596 * Reset the device to make sure it is fully operational:
2597 * on some laptops, like certain Dell Latitudes, we may
2598 * fail to properly detect presence of trackstick if device
2599 * has not been reset.
2600 */
2601 psmouse_reset(psmouse);
2602
2603 priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
2604 if (!priv)
2605 return -ENOMEM;
2606
2607 error = alps_identify(psmouse, priv);
2608 if (error)
2609 return error;
2524 2610
2525 if (set_properties) { 2611 if (set_properties) {
2526 psmouse->vendor = "ALPS"; 2612 psmouse->vendor = "ALPS";
2527 psmouse->name = dummy.flags & ALPS_DUALPOINT ? 2613 psmouse->name = priv->flags & ALPS_DUALPOINT ?
2528 "DualPoint TouchPad" : "GlidePoint"; 2614 "DualPoint TouchPad" : "GlidePoint";
2529 psmouse->model = dummy.proto_version << 8; 2615 psmouse->model = priv->proto_version;
2616 } else {
2617 /*
2618 * Destroy alps_data structure we allocated earlier since
2619 * this was just a "trial run". Otherwise we'll keep it
2620 * to be used by alps_init() which has to be called if
2621 * we succeed and set_properties is true.
2622 */
2623 kfree(priv);
2624 psmouse->private = NULL;
2530 } 2625 }
2626
2531 return 0; 2627 return 0;
2532} 2628}
2533 2629
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 66240b47819a..02513c0502fc 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -14,13 +14,14 @@
14 14
15#include <linux/input/mt.h> 15#include <linux/input/mt.h>
16 16
17#define ALPS_PROTO_V1 1 17#define ALPS_PROTO_V1 0x100
18#define ALPS_PROTO_V2 2 18#define ALPS_PROTO_V2 0x200
19#define ALPS_PROTO_V3 3 19#define ALPS_PROTO_V3 0x300
20#define ALPS_PROTO_V4 4 20#define ALPS_PROTO_V3_RUSHMORE 0x310
21#define ALPS_PROTO_V5 5 21#define ALPS_PROTO_V4 0x400
22#define ALPS_PROTO_V6 6 22#define ALPS_PROTO_V5 0x500
23#define ALPS_PROTO_V7 7 /* t3btl t4s */ 23#define ALPS_PROTO_V6 0x600
24#define ALPS_PROTO_V7 0x700 /* t3btl t4s */
24 25
25#define MAX_TOUCHES 2 26#define MAX_TOUCHES 2
26 27
@@ -46,29 +47,37 @@ enum V7_PACKET_ID {
46}; 47};
47 48
48/** 49/**
50 * struct alps_protocol_info - information about protocol used by a device
51 * @version: Indicates V1/V2/V3/...
52 * @byte0: Helps figure out whether a position report packet matches the
53 * known format for this model. The first byte of the report, ANDed with
54 * mask0, should match byte0.
55 * @mask0: The mask used to check the first byte of the report.
56 * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
57 */
58struct alps_protocol_info {
59 u16 version;
60 u8 byte0, mask0;
61 unsigned int flags;
62};
63
64/**
49 * struct alps_model_info - touchpad ID table 65 * struct alps_model_info - touchpad ID table
50 * @signature: E7 response string to match. 66 * @signature: E7 response string to match.
51 * @command_mode_resp: For V3/V4 touchpads, the final byte of the EC response 67 * @command_mode_resp: For V3/V4 touchpads, the final byte of the EC response
52 * (aka command mode response) identifies the firmware minor version. This 68 * (aka command mode response) identifies the firmware minor version. This
53 * can be used to distinguish different hardware models which are not 69 * can be used to distinguish different hardware models which are not
54 * uniquely identifiable through their E7 responses. 70 * uniquely identifiable through their E7 responses.
55 * @proto_version: Indicates V1/V2/V3/... 71 * @protocol_info: information about protcol used by the device.
56 * @byte0: Helps figure out whether a position report packet matches the
57 * known format for this model. The first byte of the report, ANDed with
58 * mask0, should match byte0.
59 * @mask0: The mask used to check the first byte of the report.
60 * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
61 * 72 *
62 * Many (but not all) ALPS touchpads can be identified by looking at the 73 * Many (but not all) ALPS touchpads can be identified by looking at the
63 * values returned in the "E7 report" and/or the "EC report." This table 74 * values returned in the "E7 report" and/or the "EC report." This table
64 * lists a number of such touchpads. 75 * lists a number of such touchpads.
65 */ 76 */
66struct alps_model_info { 77struct alps_model_info {
67 unsigned char signature[3]; 78 u8 signature[3];
68 unsigned char command_mode_resp; 79 u8 command_mode_resp;
69 unsigned char proto_version; 80 struct alps_protocol_info protocol_info;
70 unsigned char byte0, mask0;
71 int flags;
72}; 81};
73 82
74/** 83/**
@@ -132,8 +141,12 @@ struct alps_fields {
132 141
133/** 142/**
134 * struct alps_data - private data structure for the ALPS driver 143 * struct alps_data - private data structure for the ALPS driver
135 * @dev2: "Relative" device used to report trackstick or mouse activity. 144 * @psmouse: Pointer to parent psmouse device
136 * @phys: Physical path for the relative device. 145 * @dev2: Trackstick device (can be NULL).
146 * @dev3: Generic PS/2 mouse (can be NULL, delayed registering).
147 * @phys2: Physical path for the trackstick device.
148 * @phys3: Physical path for the generic PS/2 mouse.
149 * @dev3_register_work: Delayed work for registering PS/2 mouse.
137 * @nibble_commands: Command mapping used for touchpad register accesses. 150 * @nibble_commands: Command mapping used for touchpad register accesses.
138 * @addr_command: Command used to tell the touchpad that a register address 151 * @addr_command: Command used to tell the touchpad that a register address
139 * follows. 152 * follows.
@@ -160,15 +173,19 @@ struct alps_fields {
160 * @timer: Timer for flushing out the final report packet in the stream. 173 * @timer: Timer for flushing out the final report packet in the stream.
161 */ 174 */
162struct alps_data { 175struct alps_data {
176 struct psmouse *psmouse;
163 struct input_dev *dev2; 177 struct input_dev *dev2;
164 char phys[32]; 178 struct input_dev *dev3;
179 char phys2[32];
180 char phys3[32];
181 struct delayed_work dev3_register_work;
165 182
166 /* these are autodetected when the device is identified */ 183 /* these are autodetected when the device is identified */
167 const struct alps_nibble_commands *nibble_commands; 184 const struct alps_nibble_commands *nibble_commands;
168 int addr_command; 185 int addr_command;
169 unsigned char proto_version; 186 u16 proto_version;
170 unsigned char byte0, mask0; 187 u8 byte0, mask0;
171 unsigned char fw_ver[3]; 188 u8 fw_ver[3];
172 int flags; 189 int flags;
173 int x_max; 190 int x_max;
174 int y_max; 191 int y_max;
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index 9118a1861a45..28dcfc822bf6 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -710,8 +710,3 @@ err_exit:
710 710
711 return -1; 711 return -1;
712} 712}
713
714bool cypress_supported(void)
715{
716 return true;
717}
diff --git a/drivers/input/mouse/cypress_ps2.h b/drivers/input/mouse/cypress_ps2.h
index 4720f21d2d70..81f68aaed7c8 100644
--- a/drivers/input/mouse/cypress_ps2.h
+++ b/drivers/input/mouse/cypress_ps2.h
@@ -172,7 +172,6 @@ struct cytp_data {
172#ifdef CONFIG_MOUSE_PS2_CYPRESS 172#ifdef CONFIG_MOUSE_PS2_CYPRESS
173int cypress_detect(struct psmouse *psmouse, bool set_properties); 173int cypress_detect(struct psmouse *psmouse, bool set_properties);
174int cypress_init(struct psmouse *psmouse); 174int cypress_init(struct psmouse *psmouse);
175bool cypress_supported(void);
176#else 175#else
177inline int cypress_detect(struct psmouse *psmouse, bool set_properties) 176inline int cypress_detect(struct psmouse *psmouse, bool set_properties)
178{ 177{
@@ -182,10 +181,6 @@ inline int cypress_init(struct psmouse *psmouse)
182{ 181{
183 return -ENOSYS; 182 return -ENOSYS;
184} 183}
185inline bool cypress_supported(void)
186{
187 return 0;
188}
189#endif /* CONFIG_MOUSE_PS2_CYPRESS */ 184#endif /* CONFIG_MOUSE_PS2_CYPRESS */
190 185
191#endif /* _CYPRESS_PS2_H */ 186#endif /* _CYPRESS_PS2_H */
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index fca38ba63bbe..757f78a94aec 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -424,11 +424,6 @@ fail:
424 return error; 424 return error;
425} 425}
426 426
427bool focaltech_supported(void)
428{
429 return true;
430}
431
432#else /* CONFIG_MOUSE_PS2_FOCALTECH */ 427#else /* CONFIG_MOUSE_PS2_FOCALTECH */
433 428
434int focaltech_init(struct psmouse *psmouse) 429int focaltech_init(struct psmouse *psmouse)
@@ -438,9 +433,4 @@ int focaltech_init(struct psmouse *psmouse)
438 return 0; 433 return 0;
439} 434}
440 435
441bool focaltech_supported(void)
442{
443 return false;
444}
445
446#endif /* CONFIG_MOUSE_PS2_FOCALTECH */ 436#endif /* CONFIG_MOUSE_PS2_FOCALTECH */
diff --git a/drivers/input/mouse/focaltech.h b/drivers/input/mouse/focaltech.h
index 71870a9b548a..ca61ebff373e 100644
--- a/drivers/input/mouse/focaltech.h
+++ b/drivers/input/mouse/focaltech.h
@@ -19,6 +19,5 @@
19 19
20int focaltech_detect(struct psmouse *psmouse, bool set_properties); 20int focaltech_detect(struct psmouse *psmouse, bool set_properties);
21int focaltech_init(struct psmouse *psmouse); 21int focaltech_init(struct psmouse *psmouse);
22bool focaltech_supported(void);
23 22
24#endif 23#endif
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 68469feda470..4ccd01d7a48d 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -727,7 +727,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
727 if (psmouse_do_detect(focaltech_detect, psmouse, set_properties) == 0) { 727 if (psmouse_do_detect(focaltech_detect, psmouse, set_properties) == 0) {
728 if (max_proto > PSMOUSE_IMEX) { 728 if (max_proto > PSMOUSE_IMEX) {
729 if (!set_properties || focaltech_init(psmouse) == 0) { 729 if (!set_properties || focaltech_init(psmouse) == 0) {
730 if (focaltech_supported()) 730 if (IS_ENABLED(CONFIG_MOUSE_PS2_FOCALTECH))
731 return PSMOUSE_FOCALTECH; 731 return PSMOUSE_FOCALTECH;
732 /* 732 /*
733 * Note that we need to also restrict 733 * Note that we need to also restrict
@@ -776,7 +776,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
776 * Try activating protocol, but check if support is enabled first, since 776 * Try activating protocol, but check if support is enabled first, since
777 * we try detecting Synaptics even when protocol is disabled. 777 * we try detecting Synaptics even when protocol is disabled.
778 */ 778 */
779 if (synaptics_supported() && 779 if (IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS) &&
780 (!set_properties || synaptics_init(psmouse) == 0)) { 780 (!set_properties || synaptics_init(psmouse) == 0)) {
781 return PSMOUSE_SYNAPTICS; 781 return PSMOUSE_SYNAPTICS;
782 } 782 }
@@ -801,7 +801,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
801 */ 801 */
802 if (max_proto > PSMOUSE_IMEX && 802 if (max_proto > PSMOUSE_IMEX &&
803 cypress_detect(psmouse, set_properties) == 0) { 803 cypress_detect(psmouse, set_properties) == 0) {
804 if (cypress_supported()) { 804 if (IS_ENABLED(CONFIG_MOUSE_PS2_CYPRESS)) {
805 if (cypress_init(psmouse) == 0) 805 if (cypress_init(psmouse) == 0)
806 return PSMOUSE_CYPRESS; 806 return PSMOUSE_CYPRESS;
807 807
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 7e705ee90b86..f2cceb6493a0 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1454,11 +1454,6 @@ int synaptics_init_relative(struct psmouse *psmouse)
1454 return __synaptics_init(psmouse, false); 1454 return __synaptics_init(psmouse, false);
1455} 1455}
1456 1456
1457bool synaptics_supported(void)
1458{
1459 return true;
1460}
1461
1462#else /* CONFIG_MOUSE_PS2_SYNAPTICS */ 1457#else /* CONFIG_MOUSE_PS2_SYNAPTICS */
1463 1458
1464void __init synaptics_module_init(void) 1459void __init synaptics_module_init(void)
@@ -1470,9 +1465,4 @@ int synaptics_init(struct psmouse *psmouse)
1470 return -ENOSYS; 1465 return -ENOSYS;
1471} 1466}
1472 1467
1473bool synaptics_supported(void)
1474{
1475 return false;
1476}
1477
1478#endif /* CONFIG_MOUSE_PS2_SYNAPTICS */ 1468#endif /* CONFIG_MOUSE_PS2_SYNAPTICS */
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 6faf9bb7c117..aedc3299b14e 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -175,6 +175,5 @@ int synaptics_detect(struct psmouse *psmouse, bool set_properties);
175int synaptics_init(struct psmouse *psmouse); 175int synaptics_init(struct psmouse *psmouse);
176int synaptics_init_relative(struct psmouse *psmouse); 176int synaptics_init_relative(struct psmouse *psmouse);
177void synaptics_reset(struct psmouse *psmouse); 177void synaptics_reset(struct psmouse *psmouse);
178bool synaptics_supported(void);
179 178
180#endif /* _SYNAPTICS_H */ 179#endif /* _SYNAPTICS_H */
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 1daa7ca04577..9acdc080e7ec 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -192,14 +192,6 @@ static bool gic_local_irq_is_routable(int intr)
192 } 192 }
193} 193}
194 194
195unsigned int gic_get_timer_pending(void)
196{
197 unsigned int vpe_pending;
198
199 vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
200 return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
201}
202
203static void gic_bind_eic_interrupt(int irq, int set) 195static void gic_bind_eic_interrupt(int irq, int set)
204{ 196{
205 /* Convert irq vector # to hw int # */ 197 /* Convert irq vector # to hw int # */
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
index b8611e3e5e74..09df54fc1fef 100644
--- a/drivers/isdn/hardware/mISDN/Kconfig
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -24,7 +24,7 @@ config MISDN_HFCMULTI
24 * HFC-E1 (E1 interface for 2Mbit ISDN) 24 * HFC-E1 (E1 interface for 2Mbit ISDN)
25 25
26config MISDN_HFCMULTI_8xx 26config MISDN_HFCMULTI_8xx
27 boolean "Support for XHFC embedded board in HFC multiport driver" 27 bool "Support for XHFC embedded board in HFC multiport driver"
28 depends on MISDN 28 depends on MISDN
29 depends on MISDN_HFCMULTI 29 depends on MISDN_HFCMULTI
30 depends on 8xx 30 depends on 8xx
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 3c92780bda09..ff48da61c94c 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1755,7 +1755,7 @@ init_card(struct hfc_pci *hc)
1755 enable_hwirq(hc); 1755 enable_hwirq(hc);
1756 spin_unlock_irqrestore(&hc->lock, flags); 1756 spin_unlock_irqrestore(&hc->lock, flags);
1757 /* Timeout 80ms */ 1757 /* Timeout 80ms */
1758 current->state = TASK_UNINTERRUPTIBLE; 1758 set_current_state(TASK_UNINTERRUPTIBLE);
1759 schedule_timeout((80 * HZ) / 1000); 1759 schedule_timeout((80 * HZ) / 1000);
1760 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n", 1760 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
1761 hc->irq, hc->irqcnt); 1761 hc->irq, hc->irqcnt);
diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile
index c4197503900e..16f52ee73994 100644
--- a/drivers/lguest/Makefile
+++ b/drivers/lguest/Makefile
@@ -1,6 +1,3 @@
1# Guest requires the device configuration and probing code.
2obj-$(CONFIG_LGUEST_GUEST) += lguest_device.o
3
4# Host requires the other files, which can be a module. 1# Host requires the other files, which can be a module.
5obj-$(CONFIG_LGUEST) += lg.o 2obj-$(CONFIG_LGUEST) += lg.o
6lg-y = core.o hypercalls.o page_tables.o interrupts_and_traps.o \ 3lg-y = core.o hypercalls.o page_tables.o interrupts_and_traps.o \
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 6590558d1d31..7dc93aa004c8 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -208,6 +208,14 @@ void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b,
208 */ 208 */
209int run_guest(struct lg_cpu *cpu, unsigned long __user *user) 209int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
210{ 210{
211 /* If the launcher asked for a register with LHREQ_GETREG */
212 if (cpu->reg_read) {
213 if (put_user(*cpu->reg_read, user))
214 return -EFAULT;
215 cpu->reg_read = NULL;
216 return sizeof(*cpu->reg_read);
217 }
218
211 /* We stop running once the Guest is dead. */ 219 /* We stop running once the Guest is dead. */
212 while (!cpu->lg->dead) { 220 while (!cpu->lg->dead) {
213 unsigned int irq; 221 unsigned int irq;
@@ -217,21 +225,12 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
217 if (cpu->hcall) 225 if (cpu->hcall)
218 do_hypercalls(cpu); 226 do_hypercalls(cpu);
219 227
220 /* 228 /* Do we have to tell the Launcher about a trap? */
221 * It's possible the Guest did a NOTIFY hypercall to the 229 if (cpu->pending.trap) {
222 * Launcher. 230 if (copy_to_user(user, &cpu->pending,
223 */ 231 sizeof(cpu->pending)))
224 if (cpu->pending_notify) { 232 return -EFAULT;
225 /* 233 return sizeof(cpu->pending);
226 * Does it just needs to write to a registered
227 * eventfd (ie. the appropriate virtqueue thread)?
228 */
229 if (!send_notify_to_eventfd(cpu)) {
230 /* OK, we tell the main Launcher. */
231 if (put_user(cpu->pending_notify, user))
232 return -EFAULT;
233 return sizeof(cpu->pending_notify);
234 }
235 } 234 }
236 235
237 /* 236 /*
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 83511eb0923d..1219af493c0f 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -117,9 +117,6 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
117 /* Similarly, this sets the halted flag for run_guest(). */ 117 /* Similarly, this sets the halted flag for run_guest(). */
118 cpu->halted = 1; 118 cpu->halted = 1;
119 break; 119 break;
120 case LHCALL_NOTIFY:
121 cpu->pending_notify = args->arg1;
122 break;
123 default: 120 default:
124 /* It should be an architecture-specific hypercall. */ 121 /* It should be an architecture-specific hypercall. */
125 if (lguest_arch_do_hcall(cpu, args)) 122 if (lguest_arch_do_hcall(cpu, args))
@@ -189,7 +186,7 @@ static void do_async_hcalls(struct lg_cpu *cpu)
189 * Stop doing hypercalls if they want to notify the Launcher: 186 * Stop doing hypercalls if they want to notify the Launcher:
190 * it needs to service this first. 187 * it needs to service this first.
191 */ 188 */
192 if (cpu->pending_notify) 189 if (cpu->pending.trap)
193 break; 190 break;
194 } 191 }
195} 192}
@@ -280,7 +277,7 @@ void do_hypercalls(struct lg_cpu *cpu)
280 * NOTIFY to the Launcher, we want to return now. Otherwise we do 277 * NOTIFY to the Launcher, we want to return now. Otherwise we do
281 * the hypercall. 278 * the hypercall.
282 */ 279 */
283 if (!cpu->pending_notify) { 280 if (!cpu->pending.trap) {
284 do_hcall(cpu, cpu->hcall); 281 do_hcall(cpu, cpu->hcall);
285 /* 282 /*
286 * Tricky point: we reset the hcall pointer to mark the 283 * Tricky point: we reset the hcall pointer to mark the
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 2eef40be4c04..307e8b39e7d1 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -50,7 +50,10 @@ struct lg_cpu {
50 /* Bitmap of what has changed: see CHANGED_* above. */ 50 /* Bitmap of what has changed: see CHANGED_* above. */
51 int changed; 51 int changed;
52 52
53 unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */ 53 /* Pending operation. */
54 struct lguest_pending pending;
55
56 unsigned long *reg_read; /* register from LHREQ_GETREG */
54 57
55 /* At end of a page shared mapped over lguest_pages in guest. */ 58 /* At end of a page shared mapped over lguest_pages in guest. */
56 unsigned long regs_page; 59 unsigned long regs_page;
@@ -78,24 +81,18 @@ struct lg_cpu {
78 struct lg_cpu_arch arch; 81 struct lg_cpu_arch arch;
79}; 82};
80 83
81struct lg_eventfd {
82 unsigned long addr;
83 struct eventfd_ctx *event;
84};
85
86struct lg_eventfd_map {
87 unsigned int num;
88 struct lg_eventfd map[];
89};
90
91/* The private info the thread maintains about the guest. */ 84/* The private info the thread maintains about the guest. */
92struct lguest { 85struct lguest {
93 struct lguest_data __user *lguest_data; 86 struct lguest_data __user *lguest_data;
94 struct lg_cpu cpus[NR_CPUS]; 87 struct lg_cpu cpus[NR_CPUS];
95 unsigned int nr_cpus; 88 unsigned int nr_cpus;
96 89
90 /* Valid guest memory pages must be < this. */
97 u32 pfn_limit; 91 u32 pfn_limit;
98 92
93 /* Device memory is >= pfn_limit and < device_limit. */
94 u32 device_limit;
95
99 /* 96 /*
100 * This provides the offset to the base of guest-physical memory in the 97 * This provides the offset to the base of guest-physical memory in the
101 * Launcher. 98 * Launcher.
@@ -110,8 +107,6 @@ struct lguest {
110 unsigned int stack_pages; 107 unsigned int stack_pages;
111 u32 tsc_khz; 108 u32 tsc_khz;
112 109
113 struct lg_eventfd_map *eventfds;
114
115 /* Dead? */ 110 /* Dead? */
116 const char *dead; 111 const char *dead;
117}; 112};
@@ -197,8 +192,10 @@ void guest_pagetable_flush_user(struct lg_cpu *cpu);
197void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, 192void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
198 unsigned long vaddr, pte_t val); 193 unsigned long vaddr, pte_t val);
199void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages); 194void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages);
200bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode); 195bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode,
196 unsigned long *iomem);
201void pin_page(struct lg_cpu *cpu, unsigned long vaddr); 197void pin_page(struct lg_cpu *cpu, unsigned long vaddr);
198bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr);
202unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr); 199unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr);
203void page_table_guest_data_init(struct lg_cpu *cpu); 200void page_table_guest_data_init(struct lg_cpu *cpu);
204 201
@@ -210,6 +207,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu);
210int lguest_arch_init_hypercalls(struct lg_cpu *cpu); 207int lguest_arch_init_hypercalls(struct lg_cpu *cpu);
211int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args); 208int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args);
212void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start); 209void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start);
210unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any);
213 211
214/* <arch>/switcher.S: */ 212/* <arch>/switcher.S: */
215extern char start_switcher_text[], end_switcher_text[], switch_to_guest[]; 213extern char start_switcher_text[], end_switcher_text[], switch_to_guest[];
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
deleted file mode 100644
index 89088d6538fd..000000000000
--- a/drivers/lguest/lguest_device.c
+++ /dev/null
@@ -1,540 +0,0 @@
1/*P:050
2 * Lguest guests use a very simple method to describe devices. It's a
3 * series of device descriptors contained just above the top of normal Guest
4 * memory.
5 *
6 * We use the standard "virtio" device infrastructure, which provides us with a
7 * console, a network and a block driver. Each one expects some configuration
8 * information and a "virtqueue" or two to send and receive data.
9:*/
10#include <linux/init.h>
11#include <linux/bootmem.h>
12#include <linux/lguest_launcher.h>
13#include <linux/virtio.h>
14#include <linux/virtio_config.h>
15#include <linux/interrupt.h>
16#include <linux/virtio_ring.h>
17#include <linux/err.h>
18#include <linux/export.h>
19#include <linux/slab.h>
20#include <asm/io.h>
21#include <asm/paravirt.h>
22#include <asm/lguest_hcall.h>
23
24/* The pointer to our (page) of device descriptions. */
25static void *lguest_devices;
26
27/*
28 * For Guests, device memory can be used as normal memory, so we cast away the
29 * __iomem to quieten sparse.
30 */
31static inline void *lguest_map(unsigned long phys_addr, unsigned long pages)
32{
33 return (__force void *)ioremap_cache(phys_addr, PAGE_SIZE*pages);
34}
35
36static inline void lguest_unmap(void *addr)
37{
38 iounmap((__force void __iomem *)addr);
39}
40
41/*D:100
42 * Each lguest device is just a virtio device plus a pointer to its entry
43 * in the lguest_devices page.
44 */
45struct lguest_device {
46 struct virtio_device vdev;
47
48 /* The entry in the lguest_devices page for this device. */
49 struct lguest_device_desc *desc;
50};
51
52/*
53 * Since the virtio infrastructure hands us a pointer to the virtio_device all
54 * the time, it helps to have a curt macro to get a pointer to the struct
55 * lguest_device it's enclosed in.
56 */
57#define to_lgdev(vd) container_of(vd, struct lguest_device, vdev)
58
59/*D:130
60 * Device configurations
61 *
62 * The configuration information for a device consists of one or more
63 * virtqueues, a feature bitmap, and some configuration bytes. The
64 * configuration bytes don't really matter to us: the Launcher sets them up, and
65 * the driver will look at them during setup.
66 *
67 * A convenient routine to return the device's virtqueue config array:
68 * immediately after the descriptor.
69 */
70static struct lguest_vqconfig *lg_vq(const struct lguest_device_desc *desc)
71{
72 return (void *)(desc + 1);
73}
74
75/* The features come immediately after the virtqueues. */
76static u8 *lg_features(const struct lguest_device_desc *desc)
77{
78 return (void *)(lg_vq(desc) + desc->num_vq);
79}
80
81/* The config space comes after the two feature bitmasks. */
82static u8 *lg_config(const struct lguest_device_desc *desc)
83{
84 return lg_features(desc) + desc->feature_len * 2;
85}
86
87/* The total size of the config page used by this device (incl. desc) */
88static unsigned desc_size(const struct lguest_device_desc *desc)
89{
90 return sizeof(*desc)
91 + desc->num_vq * sizeof(struct lguest_vqconfig)
92 + desc->feature_len * 2
93 + desc->config_len;
94}
95
96/* This gets the device's feature bits. */
97static u64 lg_get_features(struct virtio_device *vdev)
98{
99 unsigned int i;
100 u32 features = 0;
101 struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
102 u8 *in_features = lg_features(desc);
103
104 /* We do this the slow but generic way. */
105 for (i = 0; i < min(desc->feature_len * 8, 32); i++)
106 if (in_features[i / 8] & (1 << (i % 8)))
107 features |= (1 << i);
108
109 return features;
110}
111
112/*
113 * To notify on reset or feature finalization, we (ab)use the NOTIFY
114 * hypercall, with the descriptor address of the device.
115 */
116static void status_notify(struct virtio_device *vdev)
117{
118 unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices;
119
120 hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0);
121}
122
123/*
124 * The virtio core takes the features the Host offers, and copies the ones
125 * supported by the driver into the vdev->features array. Once that's all
126 * sorted out, this routine is called so we can tell the Host which features we
127 * understand and accept.
128 */
129static int lg_finalize_features(struct virtio_device *vdev)
130{
131 unsigned int i, bits;
132 struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
133 /* Second half of bitmap is features we accept. */
134 u8 *out_features = lg_features(desc) + desc->feature_len;
135
136 /* Give virtio_ring a chance to accept features. */
137 vring_transport_features(vdev);
138
139 /* Make sure we don't have any features > 32 bits! */
140 BUG_ON((u32)vdev->features != vdev->features);
141
142 /*
143 * Since lguest is currently x86-only, we're little-endian. That
144 * means we could just memcpy. But it's not time critical, and in
145 * case someone copies this code, we do it the slow, obvious way.
146 */
147 memset(out_features, 0, desc->feature_len);
148 bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
149 for (i = 0; i < bits; i++) {
150 if (__virtio_test_bit(vdev, i))
151 out_features[i / 8] |= (1 << (i % 8));
152 }
153
154 /* Tell Host we've finished with this device's feature negotiation */
155 status_notify(vdev);
156
157 return 0;
158}
159
160/* Once they've found a field, getting a copy of it is easy. */
161static void lg_get(struct virtio_device *vdev, unsigned int offset,
162 void *buf, unsigned len)
163{
164 struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
165
166 /* Check they didn't ask for more than the length of the config! */
167 BUG_ON(offset + len > desc->config_len);
168 memcpy(buf, lg_config(desc) + offset, len);
169}
170
171/* Setting the contents is also trivial. */
172static void lg_set(struct virtio_device *vdev, unsigned int offset,
173 const void *buf, unsigned len)
174{
175 struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
176
177 /* Check they didn't ask for more than the length of the config! */
178 BUG_ON(offset + len > desc->config_len);
179 memcpy(lg_config(desc) + offset, buf, len);
180}
181
182/*
183 * The operations to get and set the status word just access the status field
184 * of the device descriptor.
185 */
186static u8 lg_get_status(struct virtio_device *vdev)
187{
188 return to_lgdev(vdev)->desc->status;
189}
190
191static void lg_set_status(struct virtio_device *vdev, u8 status)
192{
193 BUG_ON(!status);
194 to_lgdev(vdev)->desc->status = status;
195
196 /* Tell Host immediately if we failed. */
197 if (status & VIRTIO_CONFIG_S_FAILED)
198 status_notify(vdev);
199}
200
201static void lg_reset(struct virtio_device *vdev)
202{
203 /* 0 status means "reset" */
204 to_lgdev(vdev)->desc->status = 0;
205 status_notify(vdev);
206}
207
208/*
209 * Virtqueues
210 *
211 * The other piece of infrastructure virtio needs is a "virtqueue": a way of
212 * the Guest device registering buffers for the other side to read from or
213 * write into (ie. send and receive buffers). Each device can have multiple
214 * virtqueues: for example the console driver uses one queue for sending and
215 * another for receiving.
216 *
217 * Fortunately for us, a very fast shared-memory-plus-descriptors virtqueue
218 * already exists in virtio_ring.c. We just need to connect it up.
219 *
220 * We start with the information we need to keep about each virtqueue.
221 */
222
223/*D:140 This is the information we remember about each virtqueue. */
224struct lguest_vq_info {
225 /* A copy of the information contained in the device config. */
226 struct lguest_vqconfig config;
227
228 /* The address where we mapped the virtio ring, so we can unmap it. */
229 void *pages;
230};
231
232/*
233 * When the virtio_ring code wants to prod the Host, it calls us here and we
234 * make a hypercall. We hand the physical address of the virtqueue so the Host
235 * knows which virtqueue we're talking about.
236 */
237static bool lg_notify(struct virtqueue *vq)
238{
239 /*
240 * We store our virtqueue information in the "priv" pointer of the
241 * virtqueue structure.
242 */
243 struct lguest_vq_info *lvq = vq->priv;
244
245 hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0);
246 return true;
247}
248
249/* An extern declaration inside a C file is bad form. Don't do it. */
250extern int lguest_setup_irq(unsigned int irq);
251
252/*
253 * This routine finds the Nth virtqueue described in the configuration of
254 * this device and sets it up.
255 *
256 * This is kind of an ugly duckling. It'd be nicer to have a standard
257 * representation of a virtqueue in the configuration space, but it seems that
258 * everyone wants to do it differently. The KVM coders want the Guest to
259 * allocate its own pages and tell the Host where they are, but for lguest it's
260 * simpler for the Host to simply tell us where the pages are.
261 */
262static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
263 unsigned index,
264 void (*callback)(struct virtqueue *vq),
265 const char *name)
266{
267 struct lguest_device *ldev = to_lgdev(vdev);
268 struct lguest_vq_info *lvq;
269 struct virtqueue *vq;
270 int err;
271
272 if (!name)
273 return NULL;
274
275 /* We must have this many virtqueues. */
276 if (index >= ldev->desc->num_vq)
277 return ERR_PTR(-ENOENT);
278
279 lvq = kmalloc(sizeof(*lvq), GFP_KERNEL);
280 if (!lvq)
281 return ERR_PTR(-ENOMEM);
282
283 /*
284 * Make a copy of the "struct lguest_vqconfig" entry, which sits after
285 * the descriptor. We need a copy because the config space might not
286 * be aligned correctly.
287 */
288 memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config));
289
290 printk("Mapping virtqueue %i addr %lx\n", index,
291 (unsigned long)lvq->config.pfn << PAGE_SHIFT);
292 /* Figure out how many pages the ring will take, and map that memory */
293 lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT,
294 DIV_ROUND_UP(vring_size(lvq->config.num,
295 LGUEST_VRING_ALIGN),
296 PAGE_SIZE));
297 if (!lvq->pages) {
298 err = -ENOMEM;
299 goto free_lvq;
300 }
301
302 /*
303 * OK, tell virtio_ring.c to set up a virtqueue now we know its size
304 * and we've got a pointer to its pages. Note that we set weak_barriers
305 * to 'true': the host just a(nother) SMP CPU, so we only need inter-cpu
306 * barriers.
307 */
308 vq = vring_new_virtqueue(index, lvq->config.num, LGUEST_VRING_ALIGN, vdev,
309 true, lvq->pages, lg_notify, callback, name);
310 if (!vq) {
311 err = -ENOMEM;
312 goto unmap;
313 }
314
315 /* Make sure the interrupt is allocated. */
316 err = lguest_setup_irq(lvq->config.irq);
317 if (err)
318 goto destroy_vring;
319
320 /*
321 * Tell the interrupt for this virtqueue to go to the virtio_ring
322 * interrupt handler.
323 *
324 * FIXME: We used to have a flag for the Host to tell us we could use
325 * the interrupt as a source of randomness: it'd be nice to have that
326 * back.
327 */
328 err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED,
329 dev_name(&vdev->dev), vq);
330 if (err)
331 goto free_desc;
332
333 /*
334 * Last of all we hook up our 'struct lguest_vq_info" to the
335 * virtqueue's priv pointer.
336 */
337 vq->priv = lvq;
338 return vq;
339
340free_desc:
341 irq_free_desc(lvq->config.irq);
342destroy_vring:
343 vring_del_virtqueue(vq);
344unmap:
345 lguest_unmap(lvq->pages);
346free_lvq:
347 kfree(lvq);
348 return ERR_PTR(err);
349}
350/*:*/
351
352/* Cleaning up a virtqueue is easy */
353static void lg_del_vq(struct virtqueue *vq)
354{
355 struct lguest_vq_info *lvq = vq->priv;
356
357 /* Release the interrupt */
358 free_irq(lvq->config.irq, vq);
359 /* Tell virtio_ring.c to free the virtqueue. */
360 vring_del_virtqueue(vq);
361 /* Unmap the pages containing the ring. */
362 lguest_unmap(lvq->pages);
363 /* Free our own queue information. */
364 kfree(lvq);
365}
366
367static void lg_del_vqs(struct virtio_device *vdev)
368{
369 struct virtqueue *vq, *n;
370
371 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
372 lg_del_vq(vq);
373}
374
375static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs,
376 struct virtqueue *vqs[],
377 vq_callback_t *callbacks[],
378 const char *names[])
379{
380 struct lguest_device *ldev = to_lgdev(vdev);
381 int i;
382
383 /* We must have this many virtqueues. */
384 if (nvqs > ldev->desc->num_vq)
385 return -ENOENT;
386
387 for (i = 0; i < nvqs; ++i) {
388 vqs[i] = lg_find_vq(vdev, i, callbacks[i], names[i]);
389 if (IS_ERR(vqs[i]))
390 goto error;
391 }
392 return 0;
393
394error:
395 lg_del_vqs(vdev);
396 return PTR_ERR(vqs[i]);
397}
398
399static const char *lg_bus_name(struct virtio_device *vdev)
400{
401 return "";
402}
403
404/* The ops structure which hooks everything together. */
405static const struct virtio_config_ops lguest_config_ops = {
406 .get_features = lg_get_features,
407 .finalize_features = lg_finalize_features,
408 .get = lg_get,
409 .set = lg_set,
410 .get_status = lg_get_status,
411 .set_status = lg_set_status,
412 .reset = lg_reset,
413 .find_vqs = lg_find_vqs,
414 .del_vqs = lg_del_vqs,
415 .bus_name = lg_bus_name,
416};
417
418/*
419 * The root device for the lguest virtio devices. This makes them appear as
420 * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2.
421 */
422static struct device *lguest_root;
423
424/*D:120
425 * This is the core of the lguest bus: actually adding a new device.
426 * It's a separate function because it's neater that way, and because an
427 * earlier version of the code supported hotplug and unplug. They were removed
428 * early on because they were never used.
429 *
430 * As Andrew Tridgell says, "Untested code is buggy code".
431 *
432 * It's worth reading this carefully: we start with a pointer to the new device
433 * descriptor in the "lguest_devices" page, and the offset into the device
434 * descriptor page so we can uniquely identify it if things go badly wrong.
435 */
436static void add_lguest_device(struct lguest_device_desc *d,
437 unsigned int offset)
438{
439 struct lguest_device *ldev;
440
441 /* Start with zeroed memory; Linux's device layer counts on it. */
442 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
443 if (!ldev) {
444 printk(KERN_EMERG "Cannot allocate lguest dev %u type %u\n",
445 offset, d->type);
446 return;
447 }
448
449 /* This devices' parent is the lguest/ dir. */
450 ldev->vdev.dev.parent = lguest_root;
451 /*
452 * The device type comes straight from the descriptor. There's also a
453 * device vendor field in the virtio_device struct, which we leave as
454 * 0.
455 */
456 ldev->vdev.id.device = d->type;
457 /*
458 * We have a simple set of routines for querying the device's
459 * configuration information and setting its status.
460 */
461 ldev->vdev.config = &lguest_config_ops;
462 /* And we remember the device's descriptor for lguest_config_ops. */
463 ldev->desc = d;
464
465 /*
466 * register_virtio_device() sets up the generic fields for the struct
467 * virtio_device and calls device_register(). This makes the bus
468 * infrastructure look for a matching driver.
469 */
470 if (register_virtio_device(&ldev->vdev) != 0) {
471 printk(KERN_ERR "Failed to register lguest dev %u type %u\n",
472 offset, d->type);
473 kfree(ldev);
474 }
475}
476
477/*D:110
478 * scan_devices() simply iterates through the device page. The type 0 is
479 * reserved to mean "end of devices".
480 */
481static void scan_devices(void)
482{
483 unsigned int i;
484 struct lguest_device_desc *d;
485
486 /* We start at the page beginning, and skip over each entry. */
487 for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
488 d = lguest_devices + i;
489
490 /* Once we hit a zero, stop. */
491 if (d->type == 0)
492 break;
493
494 printk("Device at %i has size %u\n", i, desc_size(d));
495 add_lguest_device(d, i);
496 }
497}
498
499/*D:105
500 * Fairly early in boot, lguest_devices_init() is called to set up the
501 * lguest device infrastructure. We check that we are a Guest by checking
502 * pv_info.name: there are other ways of checking, but this seems most
503 * obvious to me.
504 *
505 * So we can access the "struct lguest_device_desc"s easily, we map that memory
506 * and store the pointer in the global "lguest_devices". Then we register a
507 * root device from which all our devices will hang (this seems to be the
508 * correct sysfs incantation).
509 *
510 * Finally we call scan_devices() which adds all the devices found in the
511 * lguest_devices page.
512 */
513static int __init lguest_devices_init(void)
514{
515 if (strcmp(pv_info.name, "lguest") != 0)
516 return 0;
517
518 lguest_root = root_device_register("lguest");
519 if (IS_ERR(lguest_root))
520 panic("Could not register lguest root");
521
522 /* Devices are in a single page above top of "normal" mem */
523 lguest_devices = lguest_map(max_pfn<<PAGE_SHIFT, 1);
524
525 scan_devices();
526 return 0;
527}
528/* We do this after core stuff, but before the drivers. */
529postcore_initcall(lguest_devices_init);
530
531/*D:150
532 * At this point in the journey we used to now wade through the lguest
533 * devices themselves: net, block and console. Since they're all now virtio
534 * devices rather than lguest-specific, I've decided to ignore them. Mostly,
535 * they're kind of boring. But this does mean you'll never experience the
536 * thrill of reading the forbidden love scene buried deep in the block driver.
537 *
538 * "make Launcher" beckons, where we answer questions like "Where do Guests
539 * come from?", and "What do you do when someone asks for optimization?".
540 */
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 4263f4cc8c55..c4c6113eb9a6 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -2,175 +2,62 @@
2 * launcher controls and communicates with the Guest. For example, 2 * launcher controls and communicates with the Guest. For example,
3 * the first write will tell us the Guest's memory layout and entry 3 * the first write will tell us the Guest's memory layout and entry
4 * point. A read will run the Guest until something happens, such as 4 * point. A read will run the Guest until something happens, such as
5 * a signal or the Guest doing a NOTIFY out to the Launcher. There is 5 * a signal or the Guest accessing a device.
6 * also a way for the Launcher to attach eventfds to particular NOTIFY
7 * values instead of returning from the read() call.
8:*/ 6:*/
9#include <linux/uaccess.h> 7#include <linux/uaccess.h>
10#include <linux/miscdevice.h> 8#include <linux/miscdevice.h>
11#include <linux/fs.h> 9#include <linux/fs.h>
12#include <linux/sched.h> 10#include <linux/sched.h>
13#include <linux/eventfd.h>
14#include <linux/file.h> 11#include <linux/file.h>
15#include <linux/slab.h> 12#include <linux/slab.h>
16#include <linux/export.h> 13#include <linux/export.h>
17#include "lg.h" 14#include "lg.h"
18 15
19/*L:056 16/*L:052
20 * Before we move on, let's jump ahead and look at what the kernel does when 17 The Launcher can get the registers, and also set some of them.
21 * it needs to look up the eventfds. That will complete our picture of how we 18*/
22 * use RCU. 19static int getreg_setup(struct lg_cpu *cpu, const unsigned long __user *input)
23 *
24 * The notification value is in cpu->pending_notify: we return true if it went
25 * to an eventfd.
26 */
27bool send_notify_to_eventfd(struct lg_cpu *cpu)
28{
29 unsigned int i;
30 struct lg_eventfd_map *map;
31
32 /*
33 * This "rcu_read_lock()" helps track when someone is still looking at
34 * the (RCU-using) eventfds array. It's not actually a lock at all;
35 * indeed it's a noop in many configurations. (You didn't expect me to
36 * explain all the RCU secrets here, did you?)
37 */
38 rcu_read_lock();
39 /*
40 * rcu_dereference is the counter-side of rcu_assign_pointer(); it
41 * makes sure we don't access the memory pointed to by
42 * cpu->lg->eventfds before cpu->lg->eventfds is set. Sounds crazy,
43 * but Alpha allows this! Paul McKenney points out that a really
44 * aggressive compiler could have the same effect:
45 * http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html
46 *
47 * So play safe, use rcu_dereference to get the rcu-protected pointer:
48 */
49 map = rcu_dereference(cpu->lg->eventfds);
50 /*
51 * Simple array search: even if they add an eventfd while we do this,
52 * we'll continue to use the old array and just won't see the new one.
53 */
54 for (i = 0; i < map->num; i++) {
55 if (map->map[i].addr == cpu->pending_notify) {
56 eventfd_signal(map->map[i].event, 1);
57 cpu->pending_notify = 0;
58 break;
59 }
60 }
61 /* We're done with the rcu-protected variable cpu->lg->eventfds. */
62 rcu_read_unlock();
63
64 /* If we cleared the notification, it's because we found a match. */
65 return cpu->pending_notify == 0;
66}
67
68/*L:055
69 * One of the more tricksy tricks in the Linux Kernel is a technique called
70 * Read Copy Update. Since one point of lguest is to teach lguest journeyers
71 * about kernel coding, I use it here. (In case you're curious, other purposes
72 * include learning about virtualization and instilling a deep appreciation for
73 * simplicity and puppies).
74 *
75 * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we
76 * add new eventfds without ever blocking readers from accessing the array.
77 * The current Launcher only does this during boot, so that never happens. But
78 * Read Copy Update is cool, and adding a lock risks damaging even more puppies
79 * than this code does.
80 *
81 * We allocate a brand new one-larger array, copy the old one and add our new
82 * element. Then we make the lg eventfd pointer point to the new array.
83 * That's the easy part: now we need to free the old one, but we need to make
84 * sure no slow CPU somewhere is still looking at it. That's what
85 * synchronize_rcu does for us: waits until every CPU has indicated that it has
86 * moved on to know it's no longer using the old one.
87 *
88 * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update.
89 */
90static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
91{ 20{
92 struct lg_eventfd_map *new, *old = lg->eventfds; 21 unsigned long which;
93
94 /*
95 * We don't allow notifications on value 0 anyway (pending_notify of
96 * 0 means "nothing pending").
97 */
98 if (!addr)
99 return -EINVAL;
100
101 /*
102 * Replace the old array with the new one, carefully: others can
103 * be accessing it at the same time.
104 */
105 new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1),
106 GFP_KERNEL);
107 if (!new)
108 return -ENOMEM;
109 22
110 /* First make identical copy. */ 23 /* We re-use the ptrace structure to specify which register to read. */
111 memcpy(new->map, old->map, sizeof(old->map[0]) * old->num); 24 if (get_user(which, input) != 0)
112 new->num = old->num; 25 return -EFAULT;
113
114 /* Now append new entry. */
115 new->map[new->num].addr = addr;
116 new->map[new->num].event = eventfd_ctx_fdget(fd);
117 if (IS_ERR(new->map[new->num].event)) {
118 int err = PTR_ERR(new->map[new->num].event);
119 kfree(new);
120 return err;
121 }
122 new->num++;
123 26
124 /* 27 /*
125 * Now put new one in place: rcu_assign_pointer() is a fancy way of 28 * We set up the cpu register pointer, and their next read will
126 * doing "lg->eventfds = new", but it uses memory barriers to make 29 * actually get the value (instead of running the guest).
127 * absolutely sure that the contents of "new" written above is nailed
128 * down before we actually do the assignment.
129 * 30 *
130 * We have to think about these kinds of things when we're operating on 31 * The last argument 'true' says we can access any register.
131 * live data without locks.
132 */ 32 */
133 rcu_assign_pointer(lg->eventfds, new); 33 cpu->reg_read = lguest_arch_regptr(cpu, which, true);
34 if (!cpu->reg_read)
35 return -ENOENT;
134 36
135 /* 37 /* And because this is a write() call, we return the length used. */
136 * We're not in a big hurry. Wait until no one's looking at old 38 return sizeof(unsigned long) * 2;
137 * version, then free it.
138 */
139 synchronize_rcu();
140 kfree(old);
141
142 return 0;
143} 39}
144 40
145/*L:052 41static int setreg(struct lg_cpu *cpu, const unsigned long __user *input)
146 * Receiving notifications from the Guest is usually done by attaching a
147 * particular LHCALL_NOTIFY value to an event filedescriptor. The eventfd will
148 * become readable when the Guest does an LHCALL_NOTIFY with that value.
149 *
150 * This is really convenient for processing each virtqueue in a separate
151 * thread.
152 */
153static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
154{ 42{
155 unsigned long addr, fd; 43 unsigned long which, value, *reg;
156 int err;
157 44
158 if (get_user(addr, input) != 0) 45 /* We re-use the ptrace structure to specify which register to read. */
46 if (get_user(which, input) != 0)
159 return -EFAULT; 47 return -EFAULT;
160 input++; 48 input++;
161 if (get_user(fd, input) != 0) 49 if (get_user(value, input) != 0)
162 return -EFAULT; 50 return -EFAULT;
163 51
164 /* 52 /* The last argument 'false' means we can't access all registers. */
165 * Just make sure two callers don't add eventfds at once. We really 53 reg = lguest_arch_regptr(cpu, which, false);
166 * only need to lock against callers adding to the same Guest, so using 54 if (!reg)
167 * the Big Lguest Lock is overkill. But this is setup, not a fast path. 55 return -ENOENT;
168 */
169 mutex_lock(&lguest_lock);
170 err = add_eventfd(lg, addr, fd);
171 mutex_unlock(&lguest_lock);
172 56
173 return err; 57 *reg = value;
58
59 /* And because this is a write() call, we return the length used. */
60 return sizeof(unsigned long) * 3;
174} 61}
175 62
176/*L:050 63/*L:050
@@ -194,6 +81,23 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
194 return 0; 81 return 0;
195} 82}
196 83
84/*L:053
85 * Deliver a trap: this is used by the Launcher if it can't emulate
86 * an instruction.
87 */
88static int trap(struct lg_cpu *cpu, const unsigned long __user *input)
89{
90 unsigned long trapnum;
91
92 if (get_user(trapnum, input) != 0)
93 return -EFAULT;
94
95 if (!deliver_trap(cpu, trapnum))
96 return -EINVAL;
97
98 return 0;
99}
100
197/*L:040 101/*L:040
198 * Once our Guest is initialized, the Launcher makes it run by reading 102 * Once our Guest is initialized, the Launcher makes it run by reading
199 * from /dev/lguest. 103 * from /dev/lguest.
@@ -237,8 +141,8 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
237 * If we returned from read() last time because the Guest sent I/O, 141 * If we returned from read() last time because the Guest sent I/O,
238 * clear the flag. 142 * clear the flag.
239 */ 143 */
240 if (cpu->pending_notify) 144 if (cpu->pending.trap)
241 cpu->pending_notify = 0; 145 cpu->pending.trap = 0;
242 146
243 /* Run the Guest until something interesting happens. */ 147 /* Run the Guest until something interesting happens. */
244 return run_guest(cpu, (unsigned long __user *)user); 148 return run_guest(cpu, (unsigned long __user *)user);
@@ -319,7 +223,7 @@ static int initialize(struct file *file, const unsigned long __user *input)
319 /* "struct lguest" contains all we (the Host) know about a Guest. */ 223 /* "struct lguest" contains all we (the Host) know about a Guest. */
320 struct lguest *lg; 224 struct lguest *lg;
321 int err; 225 int err;
322 unsigned long args[3]; 226 unsigned long args[4];
323 227
324 /* 228 /*
325 * We grab the Big Lguest lock, which protects against multiple 229 * We grab the Big Lguest lock, which protects against multiple
@@ -343,21 +247,15 @@ static int initialize(struct file *file, const unsigned long __user *input)
343 goto unlock; 247 goto unlock;
344 } 248 }
345 249
346 lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL);
347 if (!lg->eventfds) {
348 err = -ENOMEM;
349 goto free_lg;
350 }
351 lg->eventfds->num = 0;
352
353 /* Populate the easy fields of our "struct lguest" */ 250 /* Populate the easy fields of our "struct lguest" */
354 lg->mem_base = (void __user *)args[0]; 251 lg->mem_base = (void __user *)args[0];
355 lg->pfn_limit = args[1]; 252 lg->pfn_limit = args[1];
253 lg->device_limit = args[3];
356 254
357 /* This is the first cpu (cpu 0) and it will start booting at args[2] */ 255 /* This is the first cpu (cpu 0) and it will start booting at args[2] */
358 err = lg_cpu_start(&lg->cpus[0], 0, args[2]); 256 err = lg_cpu_start(&lg->cpus[0], 0, args[2]);
359 if (err) 257 if (err)
360 goto free_eventfds; 258 goto free_lg;
361 259
362 /* 260 /*
363 * Initialize the Guest's shadow page tables. This allocates 261 * Initialize the Guest's shadow page tables. This allocates
@@ -378,8 +276,6 @@ static int initialize(struct file *file, const unsigned long __user *input)
378free_regs: 276free_regs:
379 /* FIXME: This should be in free_vcpu */ 277 /* FIXME: This should be in free_vcpu */
380 free_page(lg->cpus[0].regs_page); 278 free_page(lg->cpus[0].regs_page);
381free_eventfds:
382 kfree(lg->eventfds);
383free_lg: 279free_lg:
384 kfree(lg); 280 kfree(lg);
385unlock: 281unlock:
@@ -432,8 +328,12 @@ static ssize_t write(struct file *file, const char __user *in,
432 return initialize(file, input); 328 return initialize(file, input);
433 case LHREQ_IRQ: 329 case LHREQ_IRQ:
434 return user_send_irq(cpu, input); 330 return user_send_irq(cpu, input);
435 case LHREQ_EVENTFD: 331 case LHREQ_GETREG:
436 return attach_eventfd(lg, input); 332 return getreg_setup(cpu, input);
333 case LHREQ_SETREG:
334 return setreg(cpu, input);
335 case LHREQ_TRAP:
336 return trap(cpu, input);
437 default: 337 default:
438 return -EINVAL; 338 return -EINVAL;
439 } 339 }
@@ -478,11 +378,6 @@ static int close(struct inode *inode, struct file *file)
478 mmput(lg->cpus[i].mm); 378 mmput(lg->cpus[i].mm);
479 } 379 }
480 380
481 /* Release any eventfds they registered. */
482 for (i = 0; i < lg->eventfds->num; i++)
483 eventfd_ctx_put(lg->eventfds->map[i].event);
484 kfree(lg->eventfds);
485
486 /* 381 /*
487 * If lg->dead doesn't contain an error code it will be NULL or a 382 * If lg->dead doesn't contain an error code it will be NULL or a
488 * kmalloc()ed string, either of which is ok to hand to kfree(). 383 * kmalloc()ed string, either of which is ok to hand to kfree().
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index e8b55c3a6170..e3abebc912c0 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -250,6 +250,16 @@ static void release_pte(pte_t pte)
250} 250}
251/*:*/ 251/*:*/
252 252
253static bool gpte_in_iomem(struct lg_cpu *cpu, pte_t gpte)
254{
255 /* We don't handle large pages. */
256 if (pte_flags(gpte) & _PAGE_PSE)
257 return false;
258
259 return (pte_pfn(gpte) >= cpu->lg->pfn_limit
260 && pte_pfn(gpte) < cpu->lg->device_limit);
261}
262
253static bool check_gpte(struct lg_cpu *cpu, pte_t gpte) 263static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
254{ 264{
255 if ((pte_flags(gpte) & _PAGE_PSE) || 265 if ((pte_flags(gpte) & _PAGE_PSE) ||
@@ -374,8 +384,14 @@ static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate,
374 * 384 *
375 * If we fixed up the fault (ie. we mapped the address), this routine returns 385 * If we fixed up the fault (ie. we mapped the address), this routine returns
376 * true. Otherwise, it was a real fault and we need to tell the Guest. 386 * true. Otherwise, it was a real fault and we need to tell the Guest.
387 *
388 * There's a corner case: they're trying to access memory between
389 * pfn_limit and device_limit, which is I/O memory. In this case, we
390 * return false and set @iomem to the physical address, so the the
391 * Launcher can handle the instruction manually.
377 */ 392 */
378bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) 393bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode,
394 unsigned long *iomem)
379{ 395{
380 unsigned long gpte_ptr; 396 unsigned long gpte_ptr;
381 pte_t gpte; 397 pte_t gpte;
@@ -383,6 +399,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
383 pmd_t gpmd; 399 pmd_t gpmd;
384 pgd_t gpgd; 400 pgd_t gpgd;
385 401
402 *iomem = 0;
403
386 /* We never demand page the Switcher, so trying is a mistake. */ 404 /* We never demand page the Switcher, so trying is a mistake. */
387 if (vaddr >= switcher_addr) 405 if (vaddr >= switcher_addr)
388 return false; 406 return false;
@@ -459,6 +477,12 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
459 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) 477 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
460 return false; 478 return false;
461 479
480 /* If they're accessing io memory, we expect a fault. */
481 if (gpte_in_iomem(cpu, gpte)) {
482 *iomem = (pte_pfn(gpte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
483 return false;
484 }
485
462 /* 486 /*
463 * Check that the Guest PTE flags are OK, and the page number is below 487 * Check that the Guest PTE flags are OK, and the page number is below
464 * the pfn_limit (ie. not mapping the Launcher binary). 488 * the pfn_limit (ie. not mapping the Launcher binary).
@@ -553,7 +577,9 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
553 */ 577 */
554void pin_page(struct lg_cpu *cpu, unsigned long vaddr) 578void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
555{ 579{
556 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) 580 unsigned long iomem;
581
582 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2, &iomem))
557 kill_guest(cpu, "bad stack page %#lx", vaddr); 583 kill_guest(cpu, "bad stack page %#lx", vaddr);
558} 584}
559/*:*/ 585/*:*/
@@ -647,7 +673,7 @@ void guest_pagetable_flush_user(struct lg_cpu *cpu)
647/*:*/ 673/*:*/
648 674
649/* We walk down the guest page tables to get a guest-physical address */ 675/* We walk down the guest page tables to get a guest-physical address */
650unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) 676bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr)
651{ 677{
652 pgd_t gpgd; 678 pgd_t gpgd;
653 pte_t gpte; 679 pte_t gpte;
@@ -656,31 +682,47 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
656#endif 682#endif
657 683
658 /* Still not set up? Just map 1:1. */ 684 /* Still not set up? Just map 1:1. */
659 if (unlikely(cpu->linear_pages)) 685 if (unlikely(cpu->linear_pages)) {
660 return vaddr; 686 *paddr = vaddr;
687 return true;
688 }
661 689
662 /* First step: get the top-level Guest page table entry. */ 690 /* First step: get the top-level Guest page table entry. */
663 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); 691 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
664 /* Toplevel not present? We can't map it in. */ 692 /* Toplevel not present? We can't map it in. */
665 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) { 693 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
666 kill_guest(cpu, "Bad address %#lx", vaddr); 694 goto fail;
667 return -1UL;
668 }
669 695
670#ifdef CONFIG_X86_PAE 696#ifdef CONFIG_X86_PAE
671 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); 697 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
672 if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) { 698 if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
673 kill_guest(cpu, "Bad address %#lx", vaddr); 699 goto fail;
674 return -1UL;
675 }
676 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); 700 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
677#else 701#else
678 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); 702 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
679#endif 703#endif
680 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 704 if (!(pte_flags(gpte) & _PAGE_PRESENT))
681 kill_guest(cpu, "Bad address %#lx", vaddr); 705 goto fail;
706
707 *paddr = pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
708 return true;
709
710fail:
711 *paddr = -1UL;
712 return false;
713}
682 714
683 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); 715/*
716 * This is the version we normally use: kills the Guest if it uses a
717 * bad address
718 */
719unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
720{
721 unsigned long paddr;
722
723 if (!__guest_pa(cpu, vaddr, &paddr))
724 kill_guest(cpu, "Bad address %#lx", vaddr);
725 return paddr;
684} 726}
685 727
686/* 728/*
@@ -912,7 +954,8 @@ static void __guest_set_pte(struct lg_cpu *cpu, int idx,
912 * now. This shaves 10% off a copy-on-write 954 * now. This shaves 10% off a copy-on-write
913 * micro-benchmark. 955 * micro-benchmark.
914 */ 956 */
915 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { 957 if ((pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED))
958 && !gpte_in_iomem(cpu, gpte)) {
916 if (!check_gpte(cpu, gpte)) 959 if (!check_gpte(cpu, gpte))
917 return; 960 return;
918 set_pte(spte, 961 set_pte(spte,
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6adfd7ba4c97..30f2aef69d78 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -182,6 +182,52 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
182} 182}
183/*:*/ 183/*:*/
184 184
185unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any)
186{
187 switch (reg_off) {
188 case offsetof(struct pt_regs, bx):
189 return &cpu->regs->ebx;
190 case offsetof(struct pt_regs, cx):
191 return &cpu->regs->ecx;
192 case offsetof(struct pt_regs, dx):
193 return &cpu->regs->edx;
194 case offsetof(struct pt_regs, si):
195 return &cpu->regs->esi;
196 case offsetof(struct pt_regs, di):
197 return &cpu->regs->edi;
198 case offsetof(struct pt_regs, bp):
199 return &cpu->regs->ebp;
200 case offsetof(struct pt_regs, ax):
201 return &cpu->regs->eax;
202 case offsetof(struct pt_regs, ip):
203 return &cpu->regs->eip;
204 case offsetof(struct pt_regs, sp):
205 return &cpu->regs->esp;
206 }
207
208 /* Launcher can read these, but we don't allow any setting. */
209 if (any) {
210 switch (reg_off) {
211 case offsetof(struct pt_regs, ds):
212 return &cpu->regs->ds;
213 case offsetof(struct pt_regs, es):
214 return &cpu->regs->es;
215 case offsetof(struct pt_regs, fs):
216 return &cpu->regs->fs;
217 case offsetof(struct pt_regs, gs):
218 return &cpu->regs->gs;
219 case offsetof(struct pt_regs, cs):
220 return &cpu->regs->cs;
221 case offsetof(struct pt_regs, flags):
222 return &cpu->regs->eflags;
223 case offsetof(struct pt_regs, ss):
224 return &cpu->regs->ss;
225 }
226 }
227
228 return NULL;
229}
230
185/*M:002 231/*M:002
186 * There are hooks in the scheduler which we can register to tell when we 232 * There are hooks in the scheduler which we can register to tell when we
187 * get kicked off the CPU (preempt_notifier_register()). This would allow us 233 * get kicked off the CPU (preempt_notifier_register()). This would allow us
@@ -269,110 +315,73 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
269 * usually attached to a PC. 315 * usually attached to a PC.
270 * 316 *
271 * When the Guest uses one of these instructions, we get a trap (General 317 * When the Guest uses one of these instructions, we get a trap (General
272 * Protection Fault) and come here. We see if it's one of those troublesome 318 * Protection Fault) and come here. We queue this to be sent out to the
273 * instructions and skip over it. We return true if we did. 319 * Launcher to handle.
274 */ 320 */
275static int emulate_insn(struct lg_cpu *cpu)
276{
277 u8 insn;
278 unsigned int insnlen = 0, in = 0, small_operand = 0;
279 /*
280 * The eip contains the *virtual* address of the Guest's instruction:
281 * walk the Guest's page tables to find the "physical" address.
282 */
283 unsigned long physaddr = guest_pa(cpu, cpu->regs->eip);
284
285 /*
286 * This must be the Guest kernel trying to do something, not userspace!
287 * The bottom two bits of the CS segment register are the privilege
288 * level.
289 */
290 if ((cpu->regs->cs & 3) != GUEST_PL)
291 return 0;
292
293 /* Decoding x86 instructions is icky. */
294 insn = lgread(cpu, physaddr, u8);
295 321
296 /* 322/*
297 * Around 2.6.33, the kernel started using an emulation for the 323 * The eip contains the *virtual* address of the Guest's instruction:
298 * cmpxchg8b instruction in early boot on many configurations. This 324 * we copy the instruction here so the Launcher doesn't have to walk
299 * code isn't paravirtualized, and it tries to disable interrupts. 325 * the page tables to decode it. We handle the case (eg. in a kernel
300 * Ignore it, which will Mostly Work. 326 * module) where the instruction is over two pages, and the pages are
301 */ 327 * virtually but not physically contiguous.
302 if (insn == 0xfa) { 328 *
303 /* "cli", or Clear Interrupt Enable instruction. Skip it. */ 329 * The longest possible x86 instruction is 15 bytes, but we don't handle
304 cpu->regs->eip++; 330 * anything that strange.
305 return 1; 331 */
332static void copy_from_guest(struct lg_cpu *cpu,
333 void *dst, unsigned long vaddr, size_t len)
334{
335 size_t to_page_end = PAGE_SIZE - (vaddr % PAGE_SIZE);
336 unsigned long paddr;
337
338 BUG_ON(len > PAGE_SIZE);
339
340 /* If it goes over a page, copy in two parts. */
341 if (len > to_page_end) {
342 /* But make sure the next page is mapped! */
343 if (__guest_pa(cpu, vaddr + to_page_end, &paddr))
344 copy_from_guest(cpu, dst + to_page_end,
345 vaddr + to_page_end,
346 len - to_page_end);
347 else
348 /* Otherwise fill with zeroes. */
349 memset(dst + to_page_end, 0, len - to_page_end);
350 len = to_page_end;
306 } 351 }
307 352
308 /* 353 /* This will kill the guest if it isn't mapped, but that
309 * 0x66 is an "operand prefix". It means a 16, not 32 bit in/out. 354 * shouldn't happen. */
310 */ 355 __lgread(cpu, dst, guest_pa(cpu, vaddr), len);
311 if (insn == 0x66) { 356}
312 small_operand = 1;
313 /* The instruction is 1 byte so far, read the next byte. */
314 insnlen = 1;
315 insn = lgread(cpu, physaddr + insnlen, u8);
316 }
317 357
318 /*
319 * We can ignore the lower bit for the moment and decode the 4 opcodes
320 * we need to emulate.
321 */
322 switch (insn & 0xFE) {
323 case 0xE4: /* in <next byte>,%al */
324 insnlen += 2;
325 in = 1;
326 break;
327 case 0xEC: /* in (%dx),%al */
328 insnlen += 1;
329 in = 1;
330 break;
331 case 0xE6: /* out %al,<next byte> */
332 insnlen += 2;
333 break;
334 case 0xEE: /* out %al,(%dx) */
335 insnlen += 1;
336 break;
337 default:
338 /* OK, we don't know what this is, can't emulate. */
339 return 0;
340 }
341 358
342 /* 359static void setup_emulate_insn(struct lg_cpu *cpu)
343 * If it was an "IN" instruction, they expect the result to be read 360{
344 * into %eax, so we change %eax. We always return all-ones, which 361 cpu->pending.trap = 13;
345 * traditionally means "there's nothing there". 362 copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip,
346 */ 363 sizeof(cpu->pending.insn));
347 if (in) { 364}
348 /* Lower bit tells means it's a 32/16 bit access */ 365
349 if (insn & 0x1) { 366static void setup_iomem_insn(struct lg_cpu *cpu, unsigned long iomem_addr)
350 if (small_operand) 367{
351 cpu->regs->eax |= 0xFFFF; 368 cpu->pending.trap = 14;
352 else 369 cpu->pending.addr = iomem_addr;
353 cpu->regs->eax = 0xFFFFFFFF; 370 copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip,
354 } else 371 sizeof(cpu->pending.insn));
355 cpu->regs->eax |= 0xFF;
356 }
357 /* Finally, we've "done" the instruction, so move past it. */
358 cpu->regs->eip += insnlen;
359 /* Success! */
360 return 1;
361} 372}
362 373
363/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */ 374/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */
364void lguest_arch_handle_trap(struct lg_cpu *cpu) 375void lguest_arch_handle_trap(struct lg_cpu *cpu)
365{ 376{
377 unsigned long iomem_addr;
378
366 switch (cpu->regs->trapnum) { 379 switch (cpu->regs->trapnum) {
367 case 13: /* We've intercepted a General Protection Fault. */ 380 case 13: /* We've intercepted a General Protection Fault. */
368 /* 381 /* Hand to Launcher to emulate those pesky IN and OUT insns */
369 * Check if this was one of those annoying IN or OUT
370 * instructions which we need to emulate. If so, we just go
371 * back into the Guest after we've done it.
372 */
373 if (cpu->regs->errcode == 0) { 382 if (cpu->regs->errcode == 0) {
374 if (emulate_insn(cpu)) 383 setup_emulate_insn(cpu);
375 return; 384 return;
376 } 385 }
377 break; 386 break;
378 case 14: /* We've intercepted a Page Fault. */ 387 case 14: /* We've intercepted a Page Fault. */
@@ -387,9 +396,16 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
387 * whether kernel or userspace code. 396 * whether kernel or userspace code.
388 */ 397 */
389 if (demand_page(cpu, cpu->arch.last_pagefault, 398 if (demand_page(cpu, cpu->arch.last_pagefault,
390 cpu->regs->errcode)) 399 cpu->regs->errcode, &iomem_addr))
391 return; 400 return;
392 401
402 /* Was this an access to memory mapped IO? */
403 if (iomem_addr) {
404 /* Tell Launcher, let it handle it. */
405 setup_iomem_insn(cpu, iomem_addr);
406 return;
407 }
408
393 /* 409 /*
394 * OK, it's really not there (or not OK): the Guest needs to 410 * OK, it's really not there (or not OK): the Guest needs to
395 * know. We write out the cr2 value so it knows where the 411 * know. We write out the cr2 value so it knows where the
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index c39644478aa4..63e05e32b462 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -178,7 +178,7 @@ config MD_FAULTY
178source "drivers/md/bcache/Kconfig" 178source "drivers/md/bcache/Kconfig"
179 179
180config BLK_DEV_DM_BUILTIN 180config BLK_DEV_DM_BUILTIN
181 boolean 181 bool
182 182
183config BLK_DEV_DM 183config BLK_DEV_DM
184 tristate "Device mapper support" 184 tristate "Device mapper support"
@@ -197,7 +197,7 @@ config BLK_DEV_DM
197 If unsure, say N. 197 If unsure, say N.
198 198
199config DM_DEBUG 199config DM_DEBUG
200 boolean "Device mapper debugging support" 200 bool "Device mapper debugging support"
201 depends on BLK_DEV_DM 201 depends on BLK_DEV_DM
202 ---help--- 202 ---help---
203 Enable this for messages that may help debug device-mapper problems. 203 Enable this for messages that may help debug device-mapper problems.
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 08981be7baa1..713a96237a80 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -18,9 +18,11 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/crypto.h> 19#include <linux/crypto.h>
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <linux/kthread.h>
21#include <linux/backing-dev.h> 22#include <linux/backing-dev.h>
22#include <linux/atomic.h> 23#include <linux/atomic.h>
23#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
25#include <linux/rbtree.h>
24#include <asm/page.h> 26#include <asm/page.h>
25#include <asm/unaligned.h> 27#include <asm/unaligned.h>
26#include <crypto/hash.h> 28#include <crypto/hash.h>
@@ -58,7 +60,8 @@ struct dm_crypt_io {
58 atomic_t io_pending; 60 atomic_t io_pending;
59 int error; 61 int error;
60 sector_t sector; 62 sector_t sector;
61 struct dm_crypt_io *base_io; 63
64 struct rb_node rb_node;
62} CRYPTO_MINALIGN_ATTR; 65} CRYPTO_MINALIGN_ATTR;
63 66
64struct dm_crypt_request { 67struct dm_crypt_request {
@@ -108,7 +111,8 @@ struct iv_tcw_private {
108 * Crypt: maps a linear range of a block device 111 * Crypt: maps a linear range of a block device
109 * and encrypts / decrypts at the same time. 112 * and encrypts / decrypts at the same time.
110 */ 113 */
111enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 114enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
115 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
112 116
113/* 117/*
114 * The fields in here must be read only after initialization. 118 * The fields in here must be read only after initialization.
@@ -121,14 +125,18 @@ struct crypt_config {
121 * pool for per bio private data, crypto requests and 125 * pool for per bio private data, crypto requests and
122 * encryption requeusts/buffer pages 126 * encryption requeusts/buffer pages
123 */ 127 */
124 mempool_t *io_pool;
125 mempool_t *req_pool; 128 mempool_t *req_pool;
126 mempool_t *page_pool; 129 mempool_t *page_pool;
127 struct bio_set *bs; 130 struct bio_set *bs;
131 struct mutex bio_alloc_lock;
128 132
129 struct workqueue_struct *io_queue; 133 struct workqueue_struct *io_queue;
130 struct workqueue_struct *crypt_queue; 134 struct workqueue_struct *crypt_queue;
131 135
136 struct task_struct *write_thread;
137 wait_queue_head_t write_thread_wait;
138 struct rb_root write_tree;
139
132 char *cipher; 140 char *cipher;
133 char *cipher_string; 141 char *cipher_string;
134 142
@@ -172,9 +180,6 @@ struct crypt_config {
172}; 180};
173 181
174#define MIN_IOS 16 182#define MIN_IOS 16
175#define MIN_POOL_PAGES 32
176
177static struct kmem_cache *_crypt_io_pool;
178 183
179static void clone_init(struct dm_crypt_io *, struct bio *); 184static void clone_init(struct dm_crypt_io *, struct bio *);
180static void kcryptd_queue_crypt(struct dm_crypt_io *io); 185static void kcryptd_queue_crypt(struct dm_crypt_io *io);
@@ -946,57 +951,70 @@ static int crypt_convert(struct crypt_config *cc,
946 return 0; 951 return 0;
947} 952}
948 953
954static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
955
949/* 956/*
950 * Generate a new unfragmented bio with the given size 957 * Generate a new unfragmented bio with the given size
951 * This should never violate the device limitations 958 * This should never violate the device limitations
952 * May return a smaller bio when running out of pages, indicated by 959 *
953 * *out_of_pages set to 1. 960 * This function may be called concurrently. If we allocate from the mempool
961 * concurrently, there is a possibility of deadlock. For example, if we have
962 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
963 * the mempool concurrently, it may deadlock in a situation where both processes
964 * have allocated 128 pages and the mempool is exhausted.
965 *
966 * In order to avoid this scenario we allocate the pages under a mutex.
967 *
968 * In order to not degrade performance with excessive locking, we try
969 * non-blocking allocations without a mutex first but on failure we fallback
970 * to blocking allocations with a mutex.
954 */ 971 */
955static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, 972static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
956 unsigned *out_of_pages)
957{ 973{
958 struct crypt_config *cc = io->cc; 974 struct crypt_config *cc = io->cc;
959 struct bio *clone; 975 struct bio *clone;
960 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 976 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
961 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 977 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
962 unsigned i, len; 978 unsigned i, len, remaining_size;
963 struct page *page; 979 struct page *page;
980 struct bio_vec *bvec;
981
982retry:
983 if (unlikely(gfp_mask & __GFP_WAIT))
984 mutex_lock(&cc->bio_alloc_lock);
964 985
965 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); 986 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
966 if (!clone) 987 if (!clone)
967 return NULL; 988 goto return_clone;
968 989
969 clone_init(io, clone); 990 clone_init(io, clone);
970 *out_of_pages = 0; 991
992 remaining_size = size;
971 993
972 for (i = 0; i < nr_iovecs; i++) { 994 for (i = 0; i < nr_iovecs; i++) {
973 page = mempool_alloc(cc->page_pool, gfp_mask); 995 page = mempool_alloc(cc->page_pool, gfp_mask);
974 if (!page) { 996 if (!page) {
975 *out_of_pages = 1; 997 crypt_free_buffer_pages(cc, clone);
976 break; 998 bio_put(clone);
999 gfp_mask |= __GFP_WAIT;
1000 goto retry;
977 } 1001 }
978 1002
979 /* 1003 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
980 * If additional pages cannot be allocated without waiting,
981 * return a partially-allocated bio. The caller will then try
982 * to allocate more bios while submitting this partial bio.
983 */
984 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
985 1004
986 len = (size > PAGE_SIZE) ? PAGE_SIZE : size; 1005 bvec = &clone->bi_io_vec[clone->bi_vcnt++];
1006 bvec->bv_page = page;
1007 bvec->bv_len = len;
1008 bvec->bv_offset = 0;
987 1009
988 if (!bio_add_page(clone, page, len, 0)) { 1010 clone->bi_iter.bi_size += len;
989 mempool_free(page, cc->page_pool);
990 break;
991 }
992 1011
993 size -= len; 1012 remaining_size -= len;
994 } 1013 }
995 1014
996 if (!clone->bi_iter.bi_size) { 1015return_clone:
997 bio_put(clone); 1016 if (unlikely(gfp_mask & __GFP_WAIT))
998 return NULL; 1017 mutex_unlock(&cc->bio_alloc_lock);
999 }
1000 1018
1001 return clone; 1019 return clone;
1002} 1020}
@@ -1020,7 +1038,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1020 io->base_bio = bio; 1038 io->base_bio = bio;
1021 io->sector = sector; 1039 io->sector = sector;
1022 io->error = 0; 1040 io->error = 0;
1023 io->base_io = NULL;
1024 io->ctx.req = NULL; 1041 io->ctx.req = NULL;
1025 atomic_set(&io->io_pending, 0); 1042 atomic_set(&io->io_pending, 0);
1026} 1043}
@@ -1033,13 +1050,11 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
1033/* 1050/*
1034 * One of the bios was finished. Check for completion of 1051 * One of the bios was finished. Check for completion of
1035 * the whole request and correctly clean up the buffer. 1052 * the whole request and correctly clean up the buffer.
1036 * If base_io is set, wait for the last fragment to complete.
1037 */ 1053 */
1038static void crypt_dec_pending(struct dm_crypt_io *io) 1054static void crypt_dec_pending(struct dm_crypt_io *io)
1039{ 1055{
1040 struct crypt_config *cc = io->cc; 1056 struct crypt_config *cc = io->cc;
1041 struct bio *base_bio = io->base_bio; 1057 struct bio *base_bio = io->base_bio;
1042 struct dm_crypt_io *base_io = io->base_io;
1043 int error = io->error; 1058 int error = io->error;
1044 1059
1045 if (!atomic_dec_and_test(&io->io_pending)) 1060 if (!atomic_dec_and_test(&io->io_pending))
@@ -1047,16 +1062,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
1047 1062
1048 if (io->ctx.req) 1063 if (io->ctx.req)
1049 crypt_free_req(cc, io->ctx.req, base_bio); 1064 crypt_free_req(cc, io->ctx.req, base_bio);
1050 if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size)) 1065
1051 mempool_free(io, cc->io_pool); 1066 bio_endio(base_bio, error);
1052
1053 if (likely(!base_io))
1054 bio_endio(base_bio, error);
1055 else {
1056 if (error && !base_io->error)
1057 base_io->error = error;
1058 crypt_dec_pending(base_io);
1059 }
1060} 1067}
1061 1068
1062/* 1069/*
@@ -1138,37 +1145,97 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1138 return 0; 1145 return 0;
1139} 1146}
1140 1147
1148static void kcryptd_io_read_work(struct work_struct *work)
1149{
1150 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1151
1152 crypt_inc_pending(io);
1153 if (kcryptd_io_read(io, GFP_NOIO))
1154 io->error = -ENOMEM;
1155 crypt_dec_pending(io);
1156}
1157
1158static void kcryptd_queue_read(struct dm_crypt_io *io)
1159{
1160 struct crypt_config *cc = io->cc;
1161
1162 INIT_WORK(&io->work, kcryptd_io_read_work);
1163 queue_work(cc->io_queue, &io->work);
1164}
1165
1141static void kcryptd_io_write(struct dm_crypt_io *io) 1166static void kcryptd_io_write(struct dm_crypt_io *io)
1142{ 1167{
1143 struct bio *clone = io->ctx.bio_out; 1168 struct bio *clone = io->ctx.bio_out;
1169
1144 generic_make_request(clone); 1170 generic_make_request(clone);
1145} 1171}
1146 1172
1147static void kcryptd_io(struct work_struct *work) 1173#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1174
1175static int dmcrypt_write(void *data)
1148{ 1176{
1149 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1177 struct crypt_config *cc = data;
1178 struct dm_crypt_io *io;
1150 1179
1151 if (bio_data_dir(io->base_bio) == READ) { 1180 while (1) {
1152 crypt_inc_pending(io); 1181 struct rb_root write_tree;
1153 if (kcryptd_io_read(io, GFP_NOIO)) 1182 struct blk_plug plug;
1154 io->error = -ENOMEM;
1155 crypt_dec_pending(io);
1156 } else
1157 kcryptd_io_write(io);
1158}
1159 1183
1160static void kcryptd_queue_io(struct dm_crypt_io *io) 1184 DECLARE_WAITQUEUE(wait, current);
1161{
1162 struct crypt_config *cc = io->cc;
1163 1185
1164 INIT_WORK(&io->work, kcryptd_io); 1186 spin_lock_irq(&cc->write_thread_wait.lock);
1165 queue_work(cc->io_queue, &io->work); 1187continue_locked:
1188
1189 if (!RB_EMPTY_ROOT(&cc->write_tree))
1190 goto pop_from_list;
1191
1192 __set_current_state(TASK_INTERRUPTIBLE);
1193 __add_wait_queue(&cc->write_thread_wait, &wait);
1194
1195 spin_unlock_irq(&cc->write_thread_wait.lock);
1196
1197 if (unlikely(kthread_should_stop())) {
1198 set_task_state(current, TASK_RUNNING);
1199 remove_wait_queue(&cc->write_thread_wait, &wait);
1200 break;
1201 }
1202
1203 schedule();
1204
1205 set_task_state(current, TASK_RUNNING);
1206 spin_lock_irq(&cc->write_thread_wait.lock);
1207 __remove_wait_queue(&cc->write_thread_wait, &wait);
1208 goto continue_locked;
1209
1210pop_from_list:
1211 write_tree = cc->write_tree;
1212 cc->write_tree = RB_ROOT;
1213 spin_unlock_irq(&cc->write_thread_wait.lock);
1214
1215 BUG_ON(rb_parent(write_tree.rb_node));
1216
1217 /*
1218 * Note: we cannot walk the tree here with rb_next because
1219 * the structures may be freed when kcryptd_io_write is called.
1220 */
1221 blk_start_plug(&plug);
1222 do {
1223 io = crypt_io_from_node(rb_first(&write_tree));
1224 rb_erase(&io->rb_node, &write_tree);
1225 kcryptd_io_write(io);
1226 } while (!RB_EMPTY_ROOT(&write_tree));
1227 blk_finish_plug(&plug);
1228 }
1229 return 0;
1166} 1230}
1167 1231
1168static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) 1232static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1169{ 1233{
1170 struct bio *clone = io->ctx.bio_out; 1234 struct bio *clone = io->ctx.bio_out;
1171 struct crypt_config *cc = io->cc; 1235 struct crypt_config *cc = io->cc;
1236 unsigned long flags;
1237 sector_t sector;
1238 struct rb_node **rbp, *parent;
1172 1239
1173 if (unlikely(io->error < 0)) { 1240 if (unlikely(io->error < 0)) {
1174 crypt_free_buffer_pages(cc, clone); 1241 crypt_free_buffer_pages(cc, clone);
@@ -1182,20 +1249,34 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1182 1249
1183 clone->bi_iter.bi_sector = cc->start + io->sector; 1250 clone->bi_iter.bi_sector = cc->start + io->sector;
1184 1251
1185 if (async) 1252 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
1186 kcryptd_queue_io(io);
1187 else
1188 generic_make_request(clone); 1253 generic_make_request(clone);
1254 return;
1255 }
1256
1257 spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
1258 rbp = &cc->write_tree.rb_node;
1259 parent = NULL;
1260 sector = io->sector;
1261 while (*rbp) {
1262 parent = *rbp;
1263 if (sector < crypt_io_from_node(parent)->sector)
1264 rbp = &(*rbp)->rb_left;
1265 else
1266 rbp = &(*rbp)->rb_right;
1267 }
1268 rb_link_node(&io->rb_node, parent, rbp);
1269 rb_insert_color(&io->rb_node, &cc->write_tree);
1270
1271 wake_up_locked(&cc->write_thread_wait);
1272 spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
1189} 1273}
1190 1274
1191static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 1275static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1192{ 1276{
1193 struct crypt_config *cc = io->cc; 1277 struct crypt_config *cc = io->cc;
1194 struct bio *clone; 1278 struct bio *clone;
1195 struct dm_crypt_io *new_io;
1196 int crypt_finished; 1279 int crypt_finished;
1197 unsigned out_of_pages = 0;
1198 unsigned remaining = io->base_bio->bi_iter.bi_size;
1199 sector_t sector = io->sector; 1280 sector_t sector = io->sector;
1200 int r; 1281 int r;
1201 1282
@@ -1205,80 +1286,30 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1205 crypt_inc_pending(io); 1286 crypt_inc_pending(io);
1206 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); 1287 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1207 1288
1208 /* 1289 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1209 * The allocated buffers can be smaller than the whole bio, 1290 if (unlikely(!clone)) {
1210 * so repeat the whole process until all the data can be handled. 1291 io->error = -EIO;
1211 */ 1292 goto dec;
1212 while (remaining) { 1293 }
1213 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
1214 if (unlikely(!clone)) {
1215 io->error = -ENOMEM;
1216 break;
1217 }
1218
1219 io->ctx.bio_out = clone;
1220 io->ctx.iter_out = clone->bi_iter;
1221
1222 remaining -= clone->bi_iter.bi_size;
1223 sector += bio_sectors(clone);
1224
1225 crypt_inc_pending(io);
1226
1227 r = crypt_convert(cc, &io->ctx);
1228 if (r < 0)
1229 io->error = -EIO;
1230
1231 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1232
1233 /* Encryption was already finished, submit io now */
1234 if (crypt_finished) {
1235 kcryptd_crypt_write_io_submit(io, 0);
1236
1237 /*
1238 * If there was an error, do not try next fragments.
1239 * For async, error is processed in async handler.
1240 */
1241 if (unlikely(r < 0))
1242 break;
1243 1294
1244 io->sector = sector; 1295 io->ctx.bio_out = clone;
1245 } 1296 io->ctx.iter_out = clone->bi_iter;
1246 1297
1247 /* 1298 sector += bio_sectors(clone);
1248 * Out of memory -> run queues
1249 * But don't wait if split was due to the io size restriction
1250 */
1251 if (unlikely(out_of_pages))
1252 congestion_wait(BLK_RW_ASYNC, HZ/100);
1253 1299
1254 /* 1300 crypt_inc_pending(io);
1255 * With async crypto it is unsafe to share the crypto context 1301 r = crypt_convert(cc, &io->ctx);
1256 * between fragments, so switch to a new dm_crypt_io structure. 1302 if (r)
1257 */ 1303 io->error = -EIO;
1258 if (unlikely(!crypt_finished && remaining)) { 1304 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1259 new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
1260 crypt_io_init(new_io, io->cc, io->base_bio, sector);
1261 crypt_inc_pending(new_io);
1262 crypt_convert_init(cc, &new_io->ctx, NULL,
1263 io->base_bio, sector);
1264 new_io->ctx.iter_in = io->ctx.iter_in;
1265
1266 /*
1267 * Fragments after the first use the base_io
1268 * pending count.
1269 */
1270 if (!io->base_io)
1271 new_io->base_io = io;
1272 else {
1273 new_io->base_io = io->base_io;
1274 crypt_inc_pending(io->base_io);
1275 crypt_dec_pending(io);
1276 }
1277 1305
1278 io = new_io; 1306 /* Encryption was already finished, submit io now */
1279 } 1307 if (crypt_finished) {
1308 kcryptd_crypt_write_io_submit(io, 0);
1309 io->sector = sector;
1280 } 1310 }
1281 1311
1312dec:
1282 crypt_dec_pending(io); 1313 crypt_dec_pending(io);
1283} 1314}
1284 1315
@@ -1481,6 +1512,9 @@ static void crypt_dtr(struct dm_target *ti)
1481 if (!cc) 1512 if (!cc)
1482 return; 1513 return;
1483 1514
1515 if (cc->write_thread)
1516 kthread_stop(cc->write_thread);
1517
1484 if (cc->io_queue) 1518 if (cc->io_queue)
1485 destroy_workqueue(cc->io_queue); 1519 destroy_workqueue(cc->io_queue);
1486 if (cc->crypt_queue) 1520 if (cc->crypt_queue)
@@ -1495,8 +1529,6 @@ static void crypt_dtr(struct dm_target *ti)
1495 mempool_destroy(cc->page_pool); 1529 mempool_destroy(cc->page_pool);
1496 if (cc->req_pool) 1530 if (cc->req_pool)
1497 mempool_destroy(cc->req_pool); 1531 mempool_destroy(cc->req_pool);
1498 if (cc->io_pool)
1499 mempool_destroy(cc->io_pool);
1500 1532
1501 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1533 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1502 cc->iv_gen_ops->dtr(cc); 1534 cc->iv_gen_ops->dtr(cc);
@@ -1688,7 +1720,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1688 char dummy; 1720 char dummy;
1689 1721
1690 static struct dm_arg _args[] = { 1722 static struct dm_arg _args[] = {
1691 {0, 1, "Invalid number of feature args"}, 1723 {0, 3, "Invalid number of feature args"},
1692 }; 1724 };
1693 1725
1694 if (argc < 5) { 1726 if (argc < 5) {
@@ -1710,13 +1742,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1710 if (ret < 0) 1742 if (ret < 0)
1711 goto bad; 1743 goto bad;
1712 1744
1713 ret = -ENOMEM;
1714 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
1715 if (!cc->io_pool) {
1716 ti->error = "Cannot allocate crypt io mempool";
1717 goto bad;
1718 }
1719
1720 cc->dmreq_start = sizeof(struct ablkcipher_request); 1745 cc->dmreq_start = sizeof(struct ablkcipher_request);
1721 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); 1746 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1722 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); 1747 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
@@ -1734,6 +1759,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1734 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc)); 1759 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1735 } 1760 }
1736 1761
1762 ret = -ENOMEM;
1737 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1763 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1738 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size); 1764 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
1739 if (!cc->req_pool) { 1765 if (!cc->req_pool) {
@@ -1746,7 +1772,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1746 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size, 1772 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1747 ARCH_KMALLOC_MINALIGN); 1773 ARCH_KMALLOC_MINALIGN);
1748 1774
1749 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1775 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
1750 if (!cc->page_pool) { 1776 if (!cc->page_pool) {
1751 ti->error = "Cannot allocate page mempool"; 1777 ti->error = "Cannot allocate page mempool";
1752 goto bad; 1778 goto bad;
@@ -1758,6 +1784,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1758 goto bad; 1784 goto bad;
1759 } 1785 }
1760 1786
1787 mutex_init(&cc->bio_alloc_lock);
1788
1761 ret = -EINVAL; 1789 ret = -EINVAL;
1762 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) { 1790 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
1763 ti->error = "Invalid iv_offset sector"; 1791 ti->error = "Invalid iv_offset sector";
@@ -1788,15 +1816,26 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1788 if (ret) 1816 if (ret)
1789 goto bad; 1817 goto bad;
1790 1818
1791 opt_string = dm_shift_arg(&as); 1819 while (opt_params--) {
1820 opt_string = dm_shift_arg(&as);
1821 if (!opt_string) {
1822 ti->error = "Not enough feature arguments";
1823 goto bad;
1824 }
1792 1825
1793 if (opt_params == 1 && opt_string && 1826 if (!strcasecmp(opt_string, "allow_discards"))
1794 !strcasecmp(opt_string, "allow_discards")) 1827 ti->num_discard_bios = 1;
1795 ti->num_discard_bios = 1; 1828
1796 else if (opt_params) { 1829 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
1797 ret = -EINVAL; 1830 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1798 ti->error = "Invalid feature arguments"; 1831
1799 goto bad; 1832 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
1833 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
1834
1835 else {
1836 ti->error = "Invalid feature arguments";
1837 goto bad;
1838 }
1800 } 1839 }
1801 } 1840 }
1802 1841
@@ -1807,13 +1846,28 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1807 goto bad; 1846 goto bad;
1808 } 1847 }
1809 1848
1810 cc->crypt_queue = alloc_workqueue("kcryptd", 1849 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1811 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); 1850 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1851 else
1852 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1853 num_online_cpus());
1812 if (!cc->crypt_queue) { 1854 if (!cc->crypt_queue) {
1813 ti->error = "Couldn't create kcryptd queue"; 1855 ti->error = "Couldn't create kcryptd queue";
1814 goto bad; 1856 goto bad;
1815 } 1857 }
1816 1858
1859 init_waitqueue_head(&cc->write_thread_wait);
1860 cc->write_tree = RB_ROOT;
1861
1862 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
1863 if (IS_ERR(cc->write_thread)) {
1864 ret = PTR_ERR(cc->write_thread);
1865 cc->write_thread = NULL;
1866 ti->error = "Couldn't spawn write thread";
1867 goto bad;
1868 }
1869 wake_up_process(cc->write_thread);
1870
1817 ti->num_flush_bios = 1; 1871 ti->num_flush_bios = 1;
1818 ti->discard_zeroes_data_unsupported = true; 1872 ti->discard_zeroes_data_unsupported = true;
1819 1873
@@ -1848,7 +1902,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
1848 1902
1849 if (bio_data_dir(io->base_bio) == READ) { 1903 if (bio_data_dir(io->base_bio) == READ) {
1850 if (kcryptd_io_read(io, GFP_NOWAIT)) 1904 if (kcryptd_io_read(io, GFP_NOWAIT))
1851 kcryptd_queue_io(io); 1905 kcryptd_queue_read(io);
1852 } else 1906 } else
1853 kcryptd_queue_crypt(io); 1907 kcryptd_queue_crypt(io);
1854 1908
@@ -1860,6 +1914,7 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
1860{ 1914{
1861 struct crypt_config *cc = ti->private; 1915 struct crypt_config *cc = ti->private;
1862 unsigned i, sz = 0; 1916 unsigned i, sz = 0;
1917 int num_feature_args = 0;
1863 1918
1864 switch (type) { 1919 switch (type) {
1865 case STATUSTYPE_INFO: 1920 case STATUSTYPE_INFO:
@@ -1878,8 +1933,18 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
1878 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 1933 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1879 cc->dev->name, (unsigned long long)cc->start); 1934 cc->dev->name, (unsigned long long)cc->start);
1880 1935
1881 if (ti->num_discard_bios) 1936 num_feature_args += !!ti->num_discard_bios;
1882 DMEMIT(" 1 allow_discards"); 1937 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1938 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
1939 if (num_feature_args) {
1940 DMEMIT(" %d", num_feature_args);
1941 if (ti->num_discard_bios)
1942 DMEMIT(" allow_discards");
1943 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1944 DMEMIT(" same_cpu_crypt");
1945 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
1946 DMEMIT(" submit_from_crypt_cpus");
1947 }
1883 1948
1884 break; 1949 break;
1885 } 1950 }
@@ -1976,7 +2041,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
1976 2041
1977static struct target_type crypt_target = { 2042static struct target_type crypt_target = {
1978 .name = "crypt", 2043 .name = "crypt",
1979 .version = {1, 13, 0}, 2044 .version = {1, 14, 0},
1980 .module = THIS_MODULE, 2045 .module = THIS_MODULE,
1981 .ctr = crypt_ctr, 2046 .ctr = crypt_ctr,
1982 .dtr = crypt_dtr, 2047 .dtr = crypt_dtr,
@@ -1994,15 +2059,9 @@ static int __init dm_crypt_init(void)
1994{ 2059{
1995 int r; 2060 int r;
1996 2061
1997 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
1998 if (!_crypt_io_pool)
1999 return -ENOMEM;
2000
2001 r = dm_register_target(&crypt_target); 2062 r = dm_register_target(&crypt_target);
2002 if (r < 0) { 2063 if (r < 0)
2003 DMERR("register failed %d", r); 2064 DMERR("register failed %d", r);
2004 kmem_cache_destroy(_crypt_io_pool);
2005 }
2006 2065
2007 return r; 2066 return r;
2008} 2067}
@@ -2010,7 +2069,6 @@ static int __init dm_crypt_init(void)
2010static void __exit dm_crypt_exit(void) 2069static void __exit dm_crypt_exit(void)
2011{ 2070{
2012 dm_unregister_target(&crypt_target); 2071 dm_unregister_target(&crypt_target);
2013 kmem_cache_destroy(_crypt_io_pool);
2014} 2072}
2015 2073
2016module_init(dm_crypt_init); 2074module_init(dm_crypt_init);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index c09359db3a90..37de0173b6d2 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -290,6 +290,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
290 unsigned short logical_block_size = queue_logical_block_size(q); 290 unsigned short logical_block_size = queue_logical_block_size(q);
291 sector_t num_sectors; 291 sector_t num_sectors;
292 292
293 /* Reject unsupported discard requests */
294 if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
295 dec_count(io, region, -EOPNOTSUPP);
296 return;
297 }
298
293 /* 299 /*
294 * where->count may be zero if rw holds a flush and we need to 300 * where->count may be zero if rw holds a flush and we need to
295 * send a zero-sized flush. 301 * send a zero-sized flush.
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 7dfdb5c746d6..089d62751f7f 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context)
604 return; 604 return;
605 } 605 }
606 606
607 /*
608 * If the bio is discard, return an error, but do not
609 * degrade the array.
610 */
611 if (bio->bi_rw & REQ_DISCARD) {
612 bio_endio(bio, -EOPNOTSUPP);
613 return;
614 }
615
607 for (i = 0; i < ms->nr_mirrors; i++) 616 for (i = 0; i < ms->nr_mirrors; i++)
608 if (test_bit(i, &error)) 617 if (test_bit(i, &error))
609 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); 618 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 864b03f47727..8b204ae216ab 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1432,8 +1432,6 @@ out:
1432 full_bio->bi_private = pe->full_bio_private; 1432 full_bio->bi_private = pe->full_bio_private;
1433 atomic_inc(&full_bio->bi_remaining); 1433 atomic_inc(&full_bio->bi_remaining);
1434 } 1434 }
1435 free_pending_exception(pe);
1436
1437 increment_pending_exceptions_done_count(); 1435 increment_pending_exceptions_done_count();
1438 1436
1439 up_write(&s->lock); 1437 up_write(&s->lock);
@@ -1450,6 +1448,8 @@ out:
1450 } 1448 }
1451 1449
1452 retry_origin_bios(s, origin_bios); 1450 retry_origin_bios(s, origin_bios);
1451
1452 free_pending_exception(pe);
1453} 1453}
1454 1454
1455static void commit_callback(void *context, int success) 1455static void commit_callback(void *context, int success)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ec1444f49de1..73f28802dc7a 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2571,7 +2571,7 @@ int dm_setup_md_queue(struct mapped_device *md)
2571 return 0; 2571 return 0;
2572} 2572}
2573 2573
2574static struct mapped_device *dm_find_md(dev_t dev) 2574struct mapped_device *dm_get_md(dev_t dev)
2575{ 2575{
2576 struct mapped_device *md; 2576 struct mapped_device *md;
2577 unsigned minor = MINOR(dev); 2577 unsigned minor = MINOR(dev);
@@ -2582,12 +2582,15 @@ static struct mapped_device *dm_find_md(dev_t dev)
2582 spin_lock(&_minor_lock); 2582 spin_lock(&_minor_lock);
2583 2583
2584 md = idr_find(&_minor_idr, minor); 2584 md = idr_find(&_minor_idr, minor);
2585 if (md && (md == MINOR_ALLOCED || 2585 if (md) {
2586 (MINOR(disk_devt(dm_disk(md))) != minor) || 2586 if ((md == MINOR_ALLOCED ||
2587 dm_deleting_md(md) || 2587 (MINOR(disk_devt(dm_disk(md))) != minor) ||
2588 test_bit(DMF_FREEING, &md->flags))) { 2588 dm_deleting_md(md) ||
2589 md = NULL; 2589 test_bit(DMF_FREEING, &md->flags))) {
2590 goto out; 2590 md = NULL;
2591 goto out;
2592 }
2593 dm_get(md);
2591 } 2594 }
2592 2595
2593out: 2596out:
@@ -2595,16 +2598,6 @@ out:
2595 2598
2596 return md; 2599 return md;
2597} 2600}
2598
2599struct mapped_device *dm_get_md(dev_t dev)
2600{
2601 struct mapped_device *md = dm_find_md(dev);
2602
2603 if (md)
2604 dm_get(md);
2605
2606 return md;
2607}
2608EXPORT_SYMBOL_GPL(dm_get_md); 2601EXPORT_SYMBOL_GPL(dm_get_md);
2609 2602
2610void *dm_get_mdptr(struct mapped_device *md) 2603void *dm_get_mdptr(struct mapped_device *md)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c8d2bac4e28b..cadf9cc02b25 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2555,7 +2555,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
2555 return err ? err : len; 2555 return err ? err : len;
2556} 2556}
2557static struct rdev_sysfs_entry rdev_state = 2557static struct rdev_sysfs_entry rdev_state =
2558__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); 2558__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
2559 2559
2560static ssize_t 2560static ssize_t
2561errors_show(struct md_rdev *rdev, char *page) 2561errors_show(struct md_rdev *rdev, char *page)
@@ -3638,7 +3638,8 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3638 return err ?: len; 3638 return err ?: len;
3639} 3639}
3640static struct md_sysfs_entry md_resync_start = 3640static struct md_sysfs_entry md_resync_start =
3641__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); 3641__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
3642 resync_start_show, resync_start_store);
3642 3643
3643/* 3644/*
3644 * The array state can be: 3645 * The array state can be:
@@ -3851,7 +3852,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
3851 return err ?: len; 3852 return err ?: len;
3852} 3853}
3853static struct md_sysfs_entry md_array_state = 3854static struct md_sysfs_entry md_array_state =
3854__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3855__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3855 3856
3856static ssize_t 3857static ssize_t
3857max_corrected_read_errors_show(struct mddev *mddev, char *page) { 3858max_corrected_read_errors_show(struct mddev *mddev, char *page) {
@@ -4101,7 +4102,7 @@ out_unlock:
4101} 4102}
4102 4103
4103static struct md_sysfs_entry md_metadata = 4104static struct md_sysfs_entry md_metadata =
4104__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4105__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4105 4106
4106static ssize_t 4107static ssize_t
4107action_show(struct mddev *mddev, char *page) 4108action_show(struct mddev *mddev, char *page)
@@ -4189,7 +4190,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
4189} 4190}
4190 4191
4191static struct md_sysfs_entry md_scan_mode = 4192static struct md_sysfs_entry md_scan_mode =
4192__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4193__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4193 4194
4194static ssize_t 4195static ssize_t
4195last_sync_action_show(struct mddev *mddev, char *page) 4196last_sync_action_show(struct mddev *mddev, char *page)
@@ -4335,7 +4336,8 @@ sync_completed_show(struct mddev *mddev, char *page)
4335 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 4336 return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4336} 4337}
4337 4338
4338static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 4339static struct md_sysfs_entry md_sync_completed =
4340 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4339 4341
4340static ssize_t 4342static ssize_t
4341min_sync_show(struct mddev *mddev, char *page) 4343min_sync_show(struct mddev *mddev, char *page)
diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig
index 0c2dec7aec20..78c74bb71ba4 100644
--- a/drivers/md/persistent-data/Kconfig
+++ b/drivers/md/persistent-data/Kconfig
@@ -8,7 +8,7 @@ config DM_PERSISTENT_DATA
8 device-mapper targets such as the thin provisioning target. 8 device-mapper targets such as the thin provisioning target.
9 9
10config DM_DEBUG_BLOCK_STACK_TRACING 10config DM_DEBUG_BLOCK_STACK_TRACING
11 boolean "Keep stack trace of persistent data block lock holders" 11 bool "Keep stack trace of persistent data block lock holders"
12 depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA 12 depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
13 select STACKTRACE 13 select STACKTRACE
14 ---help--- 14 ---help---
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index cfbf9617e465..ebb280a14325 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -78,7 +78,9 @@ static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b,
78 if (r) 78 if (r)
79 return r; 79 return r;
80 80
81 return count > 1; 81 *result = count > 1;
82
83 return 0;
82} 84}
83 85
84static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b, 86static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4153da5d4011..d34e238afa54 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
560 if (test_bit(WriteMostly, &rdev->flags)) { 560 if (test_bit(WriteMostly, &rdev->flags)) {
561 /* Don't balance among write-mostly, just 561 /* Don't balance among write-mostly, just
562 * use the first as a last resort */ 562 * use the first as a last resort */
563 if (best_disk < 0) { 563 if (best_dist_disk < 0) {
564 if (is_badblock(rdev, this_sector, sectors, 564 if (is_badblock(rdev, this_sector, sectors,
565 &first_bad, &bad_sectors)) { 565 &first_bad, &bad_sectors)) {
566 if (first_bad < this_sector) 566 if (first_bad < this_sector)
@@ -569,7 +569,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
569 best_good_sectors = first_bad - this_sector; 569 best_good_sectors = first_bad - this_sector;
570 } else 570 } else
571 best_good_sectors = sectors; 571 best_good_sectors = sectors;
572 best_disk = disk; 572 best_dist_disk = disk;
573 best_pending_disk = disk;
573 } 574 }
574 continue; 575 continue;
575 } 576 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e75d48c0421a..cd2f96b2c572 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5121,12 +5121,17 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
5121 schedule_timeout_uninterruptible(1); 5121 schedule_timeout_uninterruptible(1);
5122 } 5122 }
5123 /* Need to check if array will still be degraded after recovery/resync 5123 /* Need to check if array will still be degraded after recovery/resync
5124 * We don't need to check the 'failed' flag as when that gets set, 5124 * Note in case of > 1 drive failures it's possible we're rebuilding
5125 * recovery aborts. 5125 * one drive while leaving another faulty drive in array.
5126 */ 5126 */
5127 for (i = 0; i < conf->raid_disks; i++) 5127 rcu_read_lock();
5128 if (conf->disks[i].rdev == NULL) 5128 for (i = 0; i < conf->raid_disks; i++) {
5129 struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
5130
5131 if (rdev == NULL || test_bit(Faulty, &rdev->flags))
5129 still_degraded = 1; 5132 still_degraded = 1;
5133 }
5134 rcu_read_unlock();
5130 5135
5131 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 5136 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
5132 5137
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 3a2604580164..d2a85cde68da 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1111,7 +1111,7 @@ static int verify_addr(struct i2c_client *i2c)
1111 return 0; 1111 return 0;
1112} 1112}
1113 1113
1114static struct regmap_config pm860x_regmap_config = { 1114static const struct regmap_config pm860x_regmap_config = {
1115 .reg_bits = 8, 1115 .reg_bits = 8,
1116 .val_bits = 8, 1116 .val_bits = 8,
1117}; 1117};
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 2e6b7311fabc..38356e39adba 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -195,6 +195,18 @@ config MFD_DA9063
195 Additional drivers must be enabled in order to use the functionality 195 Additional drivers must be enabled in order to use the functionality
196 of the device. 196 of the device.
197 197
198config MFD_DA9150
199 tristate "Dialog Semiconductor DA9150 Charger Fuel-Gauge chip"
200 depends on I2C=y
201 select MFD_CORE
202 select REGMAP_I2C
203 select REGMAP_IRQ
204 help
205 This adds support for the DA9150 integrated charger and fuel-gauge
206 chip. This driver provides common support for accessing the device.
207 Additional drivers must be enabled in order to use the specific
208 features of the device.
209
198config MFD_DLN2 210config MFD_DLN2
199 tristate "Diolan DLN2 support" 211 tristate "Diolan DLN2 support"
200 select MFD_CORE 212 select MFD_CORE
@@ -417,6 +429,7 @@ config MFD_MAX14577
417config MFD_MAX77686 429config MFD_MAX77686
418 bool "Maxim Semiconductor MAX77686/802 PMIC Support" 430 bool "Maxim Semiconductor MAX77686/802 PMIC Support"
419 depends on I2C=y 431 depends on I2C=y
432 depends on OF
420 select MFD_CORE 433 select MFD_CORE
421 select REGMAP_I2C 434 select REGMAP_I2C
422 select REGMAP_IRQ 435 select REGMAP_IRQ
@@ -589,6 +602,20 @@ config MFD_PM8921_CORE
589 Say M here if you want to include support for PM8921 chip as a module. 602 Say M here if you want to include support for PM8921 chip as a module.
590 This will build a module called "pm8921-core". 603 This will build a module called "pm8921-core".
591 604
605config MFD_QCOM_RPM
606 tristate "Qualcomm Resource Power Manager (RPM)"
607 depends on ARCH_QCOM && OF
608 help
609 If you say yes to this option, support will be included for the
610 Resource Power Manager system found in the Qualcomm 8660, 8960 and
611 8064 based devices.
612
613 This is required to access many regulators, clocks and bus
614 frequencies controlled by the RPM on these devices.
615
616 Say M here if you want to include support for the Qualcomm RPM as a
617 module. This will build a module called "qcom_rpm".
618
592config MFD_SPMI_PMIC 619config MFD_SPMI_PMIC
593 tristate "Qualcomm SPMI PMICs" 620 tristate "Qualcomm SPMI PMICs"
594 depends on ARCH_QCOM || COMPILE_TEST 621 depends on ARCH_QCOM || COMPILE_TEST
@@ -623,6 +650,18 @@ config MFD_RTSX_PCI
623 types of memory cards, such as Memory Stick, Memory Stick Pro, 650 types of memory cards, such as Memory Stick, Memory Stick Pro,
624 Secure Digital and MultiMediaCard. 651 Secure Digital and MultiMediaCard.
625 652
653config MFD_RT5033
654 tristate "Richtek RT5033 Power Management IC"
655 depends on I2C=y
656 select MFD_CORE
657 select REGMAP_I2C
658 help
659 This driver provides for the Richtek RT5033 Power Management IC,
660 which includes the I2C driver and the Core APIs. This driver provides
661 common support for accessing the device. The device supports multiple
662 sub-devices like charger, fuel gauge, flash LED, current source,
663 LDO and Buck.
664
626config MFD_RTSX_USB 665config MFD_RTSX_USB
627 tristate "Realtek USB card reader" 666 tristate "Realtek USB card reader"
628 depends on USB 667 depends on USB
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 53467e211381..19f3d744e3bd 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -113,7 +113,7 @@ obj-$(CONFIG_MFD_DA9055) += da9055.o
113 113
114da9063-objs := da9063-core.o da9063-irq.o da9063-i2c.o 114da9063-objs := da9063-core.o da9063-irq.o da9063-i2c.o
115obj-$(CONFIG_MFD_DA9063) += da9063.o 115obj-$(CONFIG_MFD_DA9063) += da9063.o
116 116obj-$(CONFIG_MFD_DA9150) += da9150-core.o
117obj-$(CONFIG_MFD_MAX14577) += max14577.o 117obj-$(CONFIG_MFD_MAX14577) += max14577.o
118obj-$(CONFIG_MFD_MAX77686) += max77686.o 118obj-$(CONFIG_MFD_MAX77686) += max77686.o
119obj-$(CONFIG_MFD_MAX77693) += max77693.o 119obj-$(CONFIG_MFD_MAX77693) += max77693.o
@@ -153,6 +153,7 @@ obj-$(CONFIG_MFD_SI476X_CORE) += si476x-core.o
153obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o 153obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
154obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o omap-usb-tll.o 154obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o omap-usb-tll.o
155obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o ssbi.o 155obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o ssbi.o
156obj-$(CONFIG_MFD_QCOM_RPM) += qcom_rpm.o
156obj-$(CONFIG_MFD_SPMI_PMIC) += qcom-spmi-pmic.o 157obj-$(CONFIG_MFD_SPMI_PMIC) += qcom-spmi-pmic.o
157obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o 158obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
158obj-$(CONFIG_MFD_TPS65090) += tps65090.o 159obj-$(CONFIG_MFD_TPS65090) += tps65090.o
@@ -176,6 +177,7 @@ obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o
176obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o 177obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o
177obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o 178obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o
178obj-$(CONFIG_MFD_DLN2) += dln2.o 179obj-$(CONFIG_MFD_DLN2) += dln2.o
180obj-$(CONFIG_MFD_RT5033) += rt5033.o
179 181
180intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o 182intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
181obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o 183obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c
index f38bc98a3c57..facd3610ac77 100644
--- a/drivers/mfd/da9063-core.c
+++ b/drivers/mfd/da9063-core.c
@@ -86,6 +86,7 @@ static const struct mfd_cell da9063_devs[] = {
86 }, 86 },
87 { 87 {
88 .name = DA9063_DRVNAME_WATCHDOG, 88 .name = DA9063_DRVNAME_WATCHDOG,
89 .of_compatible = "dlg,da9063-watchdog",
89 }, 90 },
90 { 91 {
91 .name = DA9063_DRVNAME_HWMON, 92 .name = DA9063_DRVNAME_HWMON,
@@ -101,6 +102,7 @@ static const struct mfd_cell da9063_devs[] = {
101 .name = DA9063_DRVNAME_RTC, 102 .name = DA9063_DRVNAME_RTC,
102 .num_resources = ARRAY_SIZE(da9063_rtc_resources), 103 .num_resources = ARRAY_SIZE(da9063_rtc_resources),
103 .resources = da9063_rtc_resources, 104 .resources = da9063_rtc_resources,
105 .of_compatible = "dlg,da9063-rtc",
104 }, 106 },
105 { 107 {
106 .name = DA9063_DRVNAME_VIBRATION, 108 .name = DA9063_DRVNAME_VIBRATION,
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 21fd8d9a217b..6f3a7c0001f9 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -25,6 +25,9 @@
25#include <linux/mfd/da9063/pdata.h> 25#include <linux/mfd/da9063/pdata.h>
26#include <linux/mfd/da9063/registers.h> 26#include <linux/mfd/da9063/registers.h>
27 27
28#include <linux/of.h>
29#include <linux/regulator/of_regulator.h>
30
28static const struct regmap_range da9063_ad_readable_ranges[] = { 31static const struct regmap_range da9063_ad_readable_ranges[] = {
29 { 32 {
30 .range_min = DA9063_REG_PAGE_CON, 33 .range_min = DA9063_REG_PAGE_CON,
@@ -203,6 +206,11 @@ static struct regmap_config da9063_regmap_config = {
203 .cache_type = REGCACHE_RBTREE, 206 .cache_type = REGCACHE_RBTREE,
204}; 207};
205 208
209static const struct of_device_id da9063_dt_ids[] = {
210 { .compatible = "dlg,da9063", },
211 { }
212};
213MODULE_DEVICE_TABLE(of, da9063_dt_ids);
206static int da9063_i2c_probe(struct i2c_client *i2c, 214static int da9063_i2c_probe(struct i2c_client *i2c,
207 const struct i2c_device_id *id) 215 const struct i2c_device_id *id)
208{ 216{
@@ -257,6 +265,7 @@ static struct i2c_driver da9063_i2c_driver = {
257 .driver = { 265 .driver = {
258 .name = "da9063", 266 .name = "da9063",
259 .owner = THIS_MODULE, 267 .owner = THIS_MODULE,
268 .of_match_table = of_match_ptr(da9063_dt_ids),
260 }, 269 },
261 .probe = da9063_i2c_probe, 270 .probe = da9063_i2c_probe,
262 .remove = da9063_i2c_remove, 271 .remove = da9063_i2c_remove,
diff --git a/drivers/mfd/da9150-core.c b/drivers/mfd/da9150-core.c
new file mode 100644
index 000000000000..4d757b97ef9a
--- /dev/null
+++ b/drivers/mfd/da9150-core.c
@@ -0,0 +1,413 @@
1/*
2 * DA9150 Core MFD Driver
3 *
4 * Copyright (c) 2014 Dialog Semiconductor
5 *
6 * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/i2c.h>
18#include <linux/regmap.h>
19#include <linux/slab.h>
20#include <linux/irq.h>
21#include <linux/interrupt.h>
22#include <linux/mfd/core.h>
23#include <linux/mfd/da9150/core.h>
24#include <linux/mfd/da9150/registers.h>
25
26static bool da9150_volatile_reg(struct device *dev, unsigned int reg)
27{
28 switch (reg) {
29 case DA9150_PAGE_CON:
30 case DA9150_STATUS_A:
31 case DA9150_STATUS_B:
32 case DA9150_STATUS_C:
33 case DA9150_STATUS_D:
34 case DA9150_STATUS_E:
35 case DA9150_STATUS_F:
36 case DA9150_STATUS_G:
37 case DA9150_STATUS_H:
38 case DA9150_STATUS_I:
39 case DA9150_STATUS_J:
40 case DA9150_STATUS_K:
41 case DA9150_STATUS_L:
42 case DA9150_STATUS_N:
43 case DA9150_FAULT_LOG_A:
44 case DA9150_FAULT_LOG_B:
45 case DA9150_EVENT_E:
46 case DA9150_EVENT_F:
47 case DA9150_EVENT_G:
48 case DA9150_EVENT_H:
49 case DA9150_CONTROL_B:
50 case DA9150_CONTROL_C:
51 case DA9150_GPADC_MAN:
52 case DA9150_GPADC_RES_A:
53 case DA9150_GPADC_RES_B:
54 case DA9150_ADETVB_CFG_C:
55 case DA9150_ADETD_STAT:
56 case DA9150_ADET_CMPSTAT:
57 case DA9150_ADET_CTRL_A:
58 case DA9150_PPR_TCTR_B:
59 case DA9150_COREBTLD_STAT_A:
60 case DA9150_CORE_DATA_A:
61 case DA9150_CORE_DATA_B:
62 case DA9150_CORE_DATA_C:
63 case DA9150_CORE_DATA_D:
64 case DA9150_CORE2WIRE_STAT_A:
65 case DA9150_FW_CTRL_C:
66 case DA9150_FG_CTRL_B:
67 case DA9150_FW_CTRL_B:
68 case DA9150_GPADC_CMAN:
69 case DA9150_GPADC_CRES_A:
70 case DA9150_GPADC_CRES_B:
71 case DA9150_CC_ICHG_RES_A:
72 case DA9150_CC_ICHG_RES_B:
73 case DA9150_CC_IAVG_RES_A:
74 case DA9150_CC_IAVG_RES_B:
75 case DA9150_TAUX_CTRL_A:
76 case DA9150_TAUX_VALUE_H:
77 case DA9150_TAUX_VALUE_L:
78 case DA9150_TBAT_RES_A:
79 case DA9150_TBAT_RES_B:
80 return true;
81 default:
82 return false;
83 }
84}
85
86static const struct regmap_range_cfg da9150_range_cfg[] = {
87 {
88 .range_min = DA9150_PAGE_CON,
89 .range_max = DA9150_TBAT_RES_B,
90 .selector_reg = DA9150_PAGE_CON,
91 .selector_mask = DA9150_I2C_PAGE_MASK,
92 .selector_shift = DA9150_I2C_PAGE_SHIFT,
93 .window_start = 0,
94 .window_len = 256,
95 },
96};
97
98static struct regmap_config da9150_regmap_config = {
99 .reg_bits = 8,
100 .val_bits = 8,
101 .ranges = da9150_range_cfg,
102 .num_ranges = ARRAY_SIZE(da9150_range_cfg),
103 .max_register = DA9150_TBAT_RES_B,
104
105 .cache_type = REGCACHE_RBTREE,
106
107 .volatile_reg = da9150_volatile_reg,
108};
109
110u8 da9150_reg_read(struct da9150 *da9150, u16 reg)
111{
112 int val, ret;
113
114 ret = regmap_read(da9150->regmap, reg, &val);
115 if (ret)
116 dev_err(da9150->dev, "Failed to read from reg 0x%x: %d\n",
117 reg, ret);
118
119 return (u8) val;
120}
121EXPORT_SYMBOL_GPL(da9150_reg_read);
122
123void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val)
124{
125 int ret;
126
127 ret = regmap_write(da9150->regmap, reg, val);
128 if (ret)
129 dev_err(da9150->dev, "Failed to write to reg 0x%x: %d\n",
130 reg, ret);
131}
132EXPORT_SYMBOL_GPL(da9150_reg_write);
133
134void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val)
135{
136 int ret;
137
138 ret = regmap_update_bits(da9150->regmap, reg, mask, val);
139 if (ret)
140 dev_err(da9150->dev, "Failed to set bits in reg 0x%x: %d\n",
141 reg, ret);
142}
143EXPORT_SYMBOL_GPL(da9150_set_bits);
144
145void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf)
146{
147 int ret;
148
149 ret = regmap_bulk_read(da9150->regmap, reg, buf, count);
150 if (ret)
151 dev_err(da9150->dev, "Failed to bulk read from reg 0x%x: %d\n",
152 reg, ret);
153}
154EXPORT_SYMBOL_GPL(da9150_bulk_read);
155
156void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf)
157{
158 int ret;
159
160 ret = regmap_raw_write(da9150->regmap, reg, buf, count);
161 if (ret)
162 dev_err(da9150->dev, "Failed to bulk write to reg 0x%x %d\n",
163 reg, ret);
164}
165EXPORT_SYMBOL_GPL(da9150_bulk_write);
166
167static struct regmap_irq da9150_irqs[] = {
168 [DA9150_IRQ_VBUS] = {
169 .reg_offset = 0,
170 .mask = DA9150_E_VBUS_MASK,
171 },
172 [DA9150_IRQ_CHG] = {
173 .reg_offset = 0,
174 .mask = DA9150_E_CHG_MASK,
175 },
176 [DA9150_IRQ_TCLASS] = {
177 .reg_offset = 0,
178 .mask = DA9150_E_TCLASS_MASK,
179 },
180 [DA9150_IRQ_TJUNC] = {
181 .reg_offset = 0,
182 .mask = DA9150_E_TJUNC_MASK,
183 },
184 [DA9150_IRQ_VFAULT] = {
185 .reg_offset = 0,
186 .mask = DA9150_E_VFAULT_MASK,
187 },
188 [DA9150_IRQ_CONF] = {
189 .reg_offset = 1,
190 .mask = DA9150_E_CONF_MASK,
191 },
192 [DA9150_IRQ_DAT] = {
193 .reg_offset = 1,
194 .mask = DA9150_E_DAT_MASK,
195 },
196 [DA9150_IRQ_DTYPE] = {
197 .reg_offset = 1,
198 .mask = DA9150_E_DTYPE_MASK,
199 },
200 [DA9150_IRQ_ID] = {
201 .reg_offset = 1,
202 .mask = DA9150_E_ID_MASK,
203 },
204 [DA9150_IRQ_ADP] = {
205 .reg_offset = 1,
206 .mask = DA9150_E_ADP_MASK,
207 },
208 [DA9150_IRQ_SESS_END] = {
209 .reg_offset = 1,
210 .mask = DA9150_E_SESS_END_MASK,
211 },
212 [DA9150_IRQ_SESS_VLD] = {
213 .reg_offset = 1,
214 .mask = DA9150_E_SESS_VLD_MASK,
215 },
216 [DA9150_IRQ_FG] = {
217 .reg_offset = 2,
218 .mask = DA9150_E_FG_MASK,
219 },
220 [DA9150_IRQ_GP] = {
221 .reg_offset = 2,
222 .mask = DA9150_E_GP_MASK,
223 },
224 [DA9150_IRQ_TBAT] = {
225 .reg_offset = 2,
226 .mask = DA9150_E_TBAT_MASK,
227 },
228 [DA9150_IRQ_GPIOA] = {
229 .reg_offset = 2,
230 .mask = DA9150_E_GPIOA_MASK,
231 },
232 [DA9150_IRQ_GPIOB] = {
233 .reg_offset = 2,
234 .mask = DA9150_E_GPIOB_MASK,
235 },
236 [DA9150_IRQ_GPIOC] = {
237 .reg_offset = 2,
238 .mask = DA9150_E_GPIOC_MASK,
239 },
240 [DA9150_IRQ_GPIOD] = {
241 .reg_offset = 2,
242 .mask = DA9150_E_GPIOD_MASK,
243 },
244 [DA9150_IRQ_GPADC] = {
245 .reg_offset = 2,
246 .mask = DA9150_E_GPADC_MASK,
247 },
248 [DA9150_IRQ_WKUP] = {
249 .reg_offset = 3,
250 .mask = DA9150_E_WKUP_MASK,
251 },
252};
253
254static struct regmap_irq_chip da9150_regmap_irq_chip = {
255 .name = "da9150_irq",
256 .status_base = DA9150_EVENT_E,
257 .mask_base = DA9150_IRQ_MASK_E,
258 .ack_base = DA9150_EVENT_E,
259 .num_regs = DA9150_NUM_IRQ_REGS,
260 .irqs = da9150_irqs,
261 .num_irqs = ARRAY_SIZE(da9150_irqs),
262};
263
264static struct resource da9150_gpadc_resources[] = {
265 {
266 .name = "GPADC",
267 .start = DA9150_IRQ_GPADC,
268 .end = DA9150_IRQ_GPADC,
269 .flags = IORESOURCE_IRQ,
270 },
271};
272
273static struct resource da9150_charger_resources[] = {
274 {
275 .name = "CHG_STATUS",
276 .start = DA9150_IRQ_CHG,
277 .end = DA9150_IRQ_CHG,
278 .flags = IORESOURCE_IRQ,
279 },
280 {
281 .name = "CHG_TJUNC",
282 .start = DA9150_IRQ_TJUNC,
283 .end = DA9150_IRQ_TJUNC,
284 .flags = IORESOURCE_IRQ,
285 },
286 {
287 .name = "CHG_VFAULT",
288 .start = DA9150_IRQ_VFAULT,
289 .end = DA9150_IRQ_VFAULT,
290 .flags = IORESOURCE_IRQ,
291 },
292 {
293 .name = "CHG_VBUS",
294 .start = DA9150_IRQ_VBUS,
295 .end = DA9150_IRQ_VBUS,
296 .flags = IORESOURCE_IRQ,
297 },
298};
299
300static struct mfd_cell da9150_devs[] = {
301 {
302 .name = "da9150-gpadc",
303 .of_compatible = "dlg,da9150-gpadc",
304 .resources = da9150_gpadc_resources,
305 .num_resources = ARRAY_SIZE(da9150_gpadc_resources),
306 },
307 {
308 .name = "da9150-charger",
309 .of_compatible = "dlg,da9150-charger",
310 .resources = da9150_charger_resources,
311 .num_resources = ARRAY_SIZE(da9150_charger_resources),
312 },
313};
314
315static int da9150_probe(struct i2c_client *client,
316 const struct i2c_device_id *id)
317{
318 struct da9150 *da9150;
319 struct da9150_pdata *pdata = dev_get_platdata(&client->dev);
320 int ret;
321
322 da9150 = devm_kzalloc(&client->dev, sizeof(*da9150), GFP_KERNEL);
323 if (!da9150)
324 return -ENOMEM;
325
326 da9150->dev = &client->dev;
327 da9150->irq = client->irq;
328 i2c_set_clientdata(client, da9150);
329
330 da9150->regmap = devm_regmap_init_i2c(client, &da9150_regmap_config);
331 if (IS_ERR(da9150->regmap)) {
332 ret = PTR_ERR(da9150->regmap);
333 dev_err(da9150->dev, "Failed to allocate register map: %d\n",
334 ret);
335 return ret;
336 }
337
338 da9150->irq_base = pdata ? pdata->irq_base : -1;
339
340 ret = regmap_add_irq_chip(da9150->regmap, da9150->irq,
341 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
342 da9150->irq_base, &da9150_regmap_irq_chip,
343 &da9150->regmap_irq_data);
344 if (ret)
345 return ret;
346
347 da9150->irq_base = regmap_irq_chip_get_base(da9150->regmap_irq_data);
348 enable_irq_wake(da9150->irq);
349
350 ret = mfd_add_devices(da9150->dev, -1, da9150_devs,
351 ARRAY_SIZE(da9150_devs), NULL,
352 da9150->irq_base, NULL);
353 if (ret) {
354 dev_err(da9150->dev, "Failed to add child devices: %d\n", ret);
355 regmap_del_irq_chip(da9150->irq, da9150->regmap_irq_data);
356 return ret;
357 }
358
359 return 0;
360}
361
362static int da9150_remove(struct i2c_client *client)
363{
364 struct da9150 *da9150 = i2c_get_clientdata(client);
365
366 regmap_del_irq_chip(da9150->irq, da9150->regmap_irq_data);
367 mfd_remove_devices(da9150->dev);
368
369 return 0;
370}
371
372static void da9150_shutdown(struct i2c_client *client)
373{
374 struct da9150 *da9150 = i2c_get_clientdata(client);
375
376 /* Make sure we have a wakup source for the device */
377 da9150_set_bits(da9150, DA9150_CONFIG_D,
378 DA9150_WKUP_PM_EN_MASK,
379 DA9150_WKUP_PM_EN_MASK);
380
381 /* Set device to DISABLED mode */
382 da9150_set_bits(da9150, DA9150_CONTROL_C,
383 DA9150_DISABLE_MASK, DA9150_DISABLE_MASK);
384}
385
386static const struct i2c_device_id da9150_i2c_id[] = {
387 { "da9150", },
388 { }
389};
390MODULE_DEVICE_TABLE(i2c, da9150_i2c_id);
391
392static const struct of_device_id da9150_of_match[] = {
393 { .compatible = "dlg,da9150", },
394 { }
395};
396MODULE_DEVICE_TABLE(of, da9150_of_match);
397
398static struct i2c_driver da9150_driver = {
399 .driver = {
400 .name = "da9150",
401 .of_match_table = of_match_ptr(da9150_of_match),
402 },
403 .probe = da9150_probe,
404 .remove = da9150_remove,
405 .shutdown = da9150_shutdown,
406 .id_table = da9150_i2c_id,
407};
408
409module_i2c_driver(da9150_driver);
410
411MODULE_DESCRIPTION("MFD Core Driver for DA9150");
412MODULE_AUTHOR("Adam Thomson <Adam.Thomson.Opensource@diasemi.com>");
413MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index c835e85539b2..9bbc642a7b9d 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -33,7 +33,7 @@
33 33
34#include <linux/mfd/davinci_voicecodec.h> 34#include <linux/mfd/davinci_voicecodec.h>
35 35
36static struct regmap_config davinci_vc_regmap = { 36static const struct regmap_config davinci_vc_regmap = {
37 .reg_bits = 32, 37 .reg_bits = 32,
38 .val_bits = 32, 38 .val_bits = 32,
39}; 39};
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 16162bf43656..cc1a404328c2 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -675,15 +675,6 @@ bool prcmu_has_arm_maxopp(void)
675} 675}
676 676
677/** 677/**
678 * prcmu_get_boot_status - PRCMU boot status checking
679 * Returns: the current PRCMU boot status
680 */
681int prcmu_get_boot_status(void)
682{
683 return readb(tcdm_base + PRCM_BOOT_STATUS);
684}
685
686/**
687 * prcmu_set_rc_a2p - This function is used to run few power state sequences 678 * prcmu_set_rc_a2p - This function is used to run few power state sequences
688 * @val: Value to be set, i.e. transition requested 679 * @val: Value to be set, i.e. transition requested
689 * Returns: 0 on success, -EINVAL on invalid argument 680 * Returns: 0 on success, -EINVAL on invalid argument
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 6d49685d4ee4..1be9bd1c046d 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -587,12 +587,19 @@ static void dln2_free_rx_urbs(struct dln2_dev *dln2)
587 int i; 587 int i;
588 588
589 for (i = 0; i < DLN2_MAX_URBS; i++) { 589 for (i = 0; i < DLN2_MAX_URBS; i++) {
590 usb_kill_urb(dln2->rx_urb[i]);
591 usb_free_urb(dln2->rx_urb[i]); 590 usb_free_urb(dln2->rx_urb[i]);
592 kfree(dln2->rx_buf[i]); 591 kfree(dln2->rx_buf[i]);
593 } 592 }
594} 593}
595 594
595static void dln2_stop_rx_urbs(struct dln2_dev *dln2)
596{
597 int i;
598
599 for (i = 0; i < DLN2_MAX_URBS; i++)
600 usb_kill_urb(dln2->rx_urb[i]);
601}
602
596static void dln2_free(struct dln2_dev *dln2) 603static void dln2_free(struct dln2_dev *dln2)
597{ 604{
598 dln2_free_rx_urbs(dln2); 605 dln2_free_rx_urbs(dln2);
@@ -604,9 +611,7 @@ static int dln2_setup_rx_urbs(struct dln2_dev *dln2,
604 struct usb_host_interface *hostif) 611 struct usb_host_interface *hostif)
605{ 612{
606 int i; 613 int i;
607 int ret;
608 const int rx_max_size = DLN2_RX_BUF_SIZE; 614 const int rx_max_size = DLN2_RX_BUF_SIZE;
609 struct device *dev = &dln2->interface->dev;
610 615
611 for (i = 0; i < DLN2_MAX_URBS; i++) { 616 for (i = 0; i < DLN2_MAX_URBS; i++) {
612 dln2->rx_buf[i] = kmalloc(rx_max_size, GFP_KERNEL); 617 dln2->rx_buf[i] = kmalloc(rx_max_size, GFP_KERNEL);
@@ -620,8 +625,19 @@ static int dln2_setup_rx_urbs(struct dln2_dev *dln2,
620 usb_fill_bulk_urb(dln2->rx_urb[i], dln2->usb_dev, 625 usb_fill_bulk_urb(dln2->rx_urb[i], dln2->usb_dev,
621 usb_rcvbulkpipe(dln2->usb_dev, dln2->ep_in), 626 usb_rcvbulkpipe(dln2->usb_dev, dln2->ep_in),
622 dln2->rx_buf[i], rx_max_size, dln2_rx, dln2); 627 dln2->rx_buf[i], rx_max_size, dln2_rx, dln2);
628 }
623 629
624 ret = usb_submit_urb(dln2->rx_urb[i], GFP_KERNEL); 630 return 0;
631}
632
633static int dln2_start_rx_urbs(struct dln2_dev *dln2, gfp_t gfp)
634{
635 struct device *dev = &dln2->interface->dev;
636 int ret;
637 int i;
638
639 for (i = 0; i < DLN2_MAX_URBS; i++) {
640 ret = usb_submit_urb(dln2->rx_urb[i], gfp);
625 if (ret < 0) { 641 if (ret < 0) {
626 dev_err(dev, "failed to submit RX URB: %d\n", ret); 642 dev_err(dev, "failed to submit RX URB: %d\n", ret);
627 return ret; 643 return ret;
@@ -665,9 +681,8 @@ static const struct mfd_cell dln2_devs[] = {
665 }, 681 },
666}; 682};
667 683
668static void dln2_disconnect(struct usb_interface *interface) 684static void dln2_stop(struct dln2_dev *dln2)
669{ 685{
670 struct dln2_dev *dln2 = usb_get_intfdata(interface);
671 int i, j; 686 int i, j;
672 687
673 /* don't allow starting new transfers */ 688 /* don't allow starting new transfers */
@@ -696,6 +711,15 @@ static void dln2_disconnect(struct usb_interface *interface)
696 /* wait for transfers to end */ 711 /* wait for transfers to end */
697 wait_event(dln2->disconnect_wq, !dln2->active_transfers); 712 wait_event(dln2->disconnect_wq, !dln2->active_transfers);
698 713
714 dln2_stop_rx_urbs(dln2);
715}
716
717static void dln2_disconnect(struct usb_interface *interface)
718{
719 struct dln2_dev *dln2 = usb_get_intfdata(interface);
720
721 dln2_stop(dln2);
722
699 mfd_remove_devices(&interface->dev); 723 mfd_remove_devices(&interface->dev);
700 724
701 dln2_free(dln2); 725 dln2_free(dln2);
@@ -738,28 +762,53 @@ static int dln2_probe(struct usb_interface *interface,
738 762
739 ret = dln2_setup_rx_urbs(dln2, hostif); 763 ret = dln2_setup_rx_urbs(dln2, hostif);
740 if (ret) 764 if (ret)
741 goto out_cleanup; 765 goto out_free;
766
767 ret = dln2_start_rx_urbs(dln2, GFP_KERNEL);
768 if (ret)
769 goto out_stop_rx;
742 770
743 ret = dln2_hw_init(dln2); 771 ret = dln2_hw_init(dln2);
744 if (ret < 0) { 772 if (ret < 0) {
745 dev_err(dev, "failed to initialize hardware\n"); 773 dev_err(dev, "failed to initialize hardware\n");
746 goto out_cleanup; 774 goto out_stop_rx;
747 } 775 }
748 776
749 ret = mfd_add_hotplug_devices(dev, dln2_devs, ARRAY_SIZE(dln2_devs)); 777 ret = mfd_add_hotplug_devices(dev, dln2_devs, ARRAY_SIZE(dln2_devs));
750 if (ret != 0) { 778 if (ret != 0) {
751 dev_err(dev, "failed to add mfd devices to core\n"); 779 dev_err(dev, "failed to add mfd devices to core\n");
752 goto out_cleanup; 780 goto out_stop_rx;
753 } 781 }
754 782
755 return 0; 783 return 0;
756 784
757out_cleanup: 785out_stop_rx:
786 dln2_stop_rx_urbs(dln2);
787
788out_free:
758 dln2_free(dln2); 789 dln2_free(dln2);
759 790
760 return ret; 791 return ret;
761} 792}
762 793
794static int dln2_suspend(struct usb_interface *iface, pm_message_t message)
795{
796 struct dln2_dev *dln2 = usb_get_intfdata(iface);
797
798 dln2_stop(dln2);
799
800 return 0;
801}
802
803static int dln2_resume(struct usb_interface *iface)
804{
805 struct dln2_dev *dln2 = usb_get_intfdata(iface);
806
807 dln2->disconnect = false;
808
809 return dln2_start_rx_urbs(dln2, GFP_NOIO);
810}
811
763static const struct usb_device_id dln2_table[] = { 812static const struct usb_device_id dln2_table[] = {
764 { USB_DEVICE(0xa257, 0x2013) }, 813 { USB_DEVICE(0xa257, 0x2013) },
765 { } 814 { }
@@ -772,6 +821,8 @@ static struct usb_driver dln2_driver = {
772 .probe = dln2_probe, 821 .probe = dln2_probe,
773 .disconnect = dln2_disconnect, 822 .disconnect = dln2_disconnect,
774 .id_table = dln2_table, 823 .id_table = dln2_table,
824 .suspend = dln2_suspend,
825 .resume = dln2_resume,
775}; 826};
776 827
777module_usb_driver(dln2_driver); 828module_usb_driver(dln2_driver);
diff --git a/drivers/mfd/hi6421-pmic-core.c b/drivers/mfd/hi6421-pmic-core.c
index 321a2656fd00..7210ae28bf81 100644
--- a/drivers/mfd/hi6421-pmic-core.c
+++ b/drivers/mfd/hi6421-pmic-core.c
@@ -35,7 +35,7 @@ static const struct mfd_cell hi6421_devs[] = {
35 { .name = "hi6421-regulator", }, 35 { .name = "hi6421-regulator", },
36}; 36};
37 37
38static struct regmap_config hi6421_regmap_config = { 38static const struct regmap_config hi6421_regmap_config = {
39 .reg_bits = 32, 39 .reg_bits = 32,
40 .reg_stride = 4, 40 .reg_stride = 4,
41 .val_bits = 8, 41 .val_bits = 8,
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index df7b0642a5b4..80cef048b904 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -64,6 +64,9 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
64 config = (struct intel_soc_pmic_config *)id->driver_data; 64 config = (struct intel_soc_pmic_config *)id->driver_data;
65 65
66 pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL); 66 pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
67 if (!pmic)
68 return -ENOMEM;
69
67 dev_set_drvdata(dev, pmic); 70 dev_set_drvdata(dev, pmic);
68 71
69 pmic->regmap = devm_regmap_init_i2c(i2c, config->regmap_config); 72 pmic->regmap = devm_regmap_init_i2c(i2c, config->regmap_config);
diff --git a/drivers/mfd/intel_soc_pmic_core.h b/drivers/mfd/intel_soc_pmic_core.h
index 33aacd9baddc..9498d6719847 100644
--- a/drivers/mfd/intel_soc_pmic_core.h
+++ b/drivers/mfd/intel_soc_pmic_core.h
@@ -23,7 +23,7 @@ struct intel_soc_pmic_config {
23 unsigned long irq_flags; 23 unsigned long irq_flags;
24 struct mfd_cell *cell_dev; 24 struct mfd_cell *cell_dev;
25 int n_cell_devs; 25 int n_cell_devs;
26 struct regmap_config *regmap_config; 26 const struct regmap_config *regmap_config;
27 struct regmap_irq_chip *irq_chip; 27 struct regmap_irq_chip *irq_chip;
28}; 28};
29 29
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index c85e2ecb868a..4cc1b324e971 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -111,7 +111,7 @@ static struct mfd_cell crystal_cove_dev[] = {
111 }, 111 },
112}; 112};
113 113
114static struct regmap_config crystal_cove_regmap_config = { 114static const struct regmap_config crystal_cove_regmap_config = {
115 .reg_bits = 8, 115 .reg_bits = 8,
116 .val_bits = 8, 116 .val_bits = 8,
117 117
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
index 8c29f7b27324..d42fbb667d8c 100644
--- a/drivers/mfd/lm3533-core.c
+++ b/drivers/mfd/lm3533-core.c
@@ -583,7 +583,7 @@ static bool lm3533_precious_register(struct device *dev, unsigned int reg)
583 } 583 }
584} 584}
585 585
586static struct regmap_config regmap_config = { 586static const struct regmap_config regmap_config = {
587 .reg_bits = 8, 587 .reg_bits = 8,
588 .val_bits = 8, 588 .val_bits = 8,
589 .max_register = LM3533_REG_MAX, 589 .max_register = LM3533_REG_MAX,
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index 5c38df35a84d..a56e4ba5227b 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -75,6 +75,7 @@ static struct lpc_sch_info sch_chipset_info[] = {
75 [LPC_QUARK_X1000] = { 75 [LPC_QUARK_X1000] = {
76 .io_size_gpio = GPIO_IO_SIZE, 76 .io_size_gpio = GPIO_IO_SIZE,
77 .irq_gpio = GPIO_IRQ_QUARK_X1000, 77 .irq_gpio = GPIO_IRQ_QUARK_X1000,
78 .io_size_wdt = WDT_IO_SIZE,
78 }, 79 },
79}; 80};
80 81
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 929795eae9fc..760d08d7923d 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -111,17 +111,17 @@ static bool max77802_is_volatile_reg(struct device *dev, unsigned int reg)
111 max77802_rtc_is_volatile_reg(dev, reg)); 111 max77802_rtc_is_volatile_reg(dev, reg));
112} 112}
113 113
114static struct regmap_config max77686_regmap_config = { 114static const struct regmap_config max77686_regmap_config = {
115 .reg_bits = 8, 115 .reg_bits = 8,
116 .val_bits = 8, 116 .val_bits = 8,
117}; 117};
118 118
119static struct regmap_config max77686_rtc_regmap_config = { 119static const struct regmap_config max77686_rtc_regmap_config = {
120 .reg_bits = 8, 120 .reg_bits = 8,
121 .val_bits = 8, 121 .val_bits = 8,
122}; 122};
123 123
124static struct regmap_config max77802_regmap_config = { 124static const struct regmap_config max77802_regmap_config = {
125 .reg_bits = 8, 125 .reg_bits = 8,
126 .val_bits = 8, 126 .val_bits = 8,
127 .writeable_reg = max77802_is_accessible_reg, 127 .writeable_reg = max77802_is_accessible_reg,
@@ -205,24 +205,10 @@ static const struct of_device_id max77686_pmic_dt_match[] = {
205 { }, 205 { },
206}; 206};
207 207
208static struct max77686_platform_data *max77686_i2c_parse_dt_pdata(struct device
209 *dev)
210{
211 struct max77686_platform_data *pd;
212
213 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
214 if (!pd)
215 return NULL;
216
217 dev->platform_data = pd;
218 return pd;
219}
220
221static int max77686_i2c_probe(struct i2c_client *i2c, 208static int max77686_i2c_probe(struct i2c_client *i2c,
222 const struct i2c_device_id *id) 209 const struct i2c_device_id *id)
223{ 210{
224 struct max77686_dev *max77686 = NULL; 211 struct max77686_dev *max77686 = NULL;
225 struct max77686_platform_data *pdata = dev_get_platdata(&i2c->dev);
226 const struct of_device_id *match; 212 const struct of_device_id *match;
227 unsigned int data; 213 unsigned int data;
228 int ret = 0; 214 int ret = 0;
@@ -233,14 +219,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
233 const struct mfd_cell *cells; 219 const struct mfd_cell *cells;
234 int n_devs; 220 int n_devs;
235 221
236 if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node && !pdata)
237 pdata = max77686_i2c_parse_dt_pdata(&i2c->dev);
238
239 if (!pdata) {
240 dev_err(&i2c->dev, "No platform data found.\n");
241 return -EINVAL;
242 }
243
244 max77686 = devm_kzalloc(&i2c->dev, 222 max77686 = devm_kzalloc(&i2c->dev,
245 sizeof(struct max77686_dev), GFP_KERNEL); 223 sizeof(struct max77686_dev), GFP_KERNEL);
246 if (!max77686) 224 if (!max77686)
@@ -259,7 +237,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
259 max77686->dev = &i2c->dev; 237 max77686->dev = &i2c->dev;
260 max77686->i2c = i2c; 238 max77686->i2c = i2c;
261 239
262 max77686->wakeup = pdata->wakeup;
263 max77686->irq = i2c->irq; 240 max77686->irq = i2c->irq;
264 241
265 if (max77686->type == TYPE_MAX77686) { 242 if (max77686->type == TYPE_MAX77686) {
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index ae3addb153a2..68b844811566 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -46,7 +46,7 @@ static const struct of_device_id mc13xxx_dt_ids[] = {
46}; 46};
47MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids); 47MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
48 48
49static struct regmap_config mc13xxx_regmap_i2c_config = { 49static const struct regmap_config mc13xxx_regmap_i2c_config = {
50 .reg_bits = 8, 50 .reg_bits = 8,
51 .val_bits = 24, 51 .val_bits = 24,
52 52
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 702925e242c9..58a170e45d88 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -48,7 +48,7 @@ static const struct of_device_id mc13xxx_dt_ids[] = {
48}; 48};
49MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids); 49MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
50 50
51static struct regmap_config mc13xxx_regmap_spi_config = { 51static const struct regmap_config mc13xxx_regmap_spi_config = {
52 .reg_bits = 7, 52 .reg_bits = 7,
53 .pad_bits = 1, 53 .pad_bits = 1,
54 .val_bits = 24, 54 .val_bits = 24,
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 04cd54dd507c..1d924d1533c0 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -129,16 +129,6 @@ static inline u32 usbhs_read(void __iomem *base, u32 reg)
129 return readl_relaxed(base + reg); 129 return readl_relaxed(base + reg);
130} 130}
131 131
132static inline void usbhs_writeb(void __iomem *base, u8 reg, u8 val)
133{
134 writeb_relaxed(val, base + reg);
135}
136
137static inline u8 usbhs_readb(void __iomem *base, u8 reg)
138{
139 return readb_relaxed(base + reg);
140}
141
142/*-------------------------------------------------------------------------*/ 132/*-------------------------------------------------------------------------*/
143 133
144/** 134/**
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 43664eb69c93..6155d123a84e 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -183,7 +183,7 @@ static int pcf50633_resume(struct device *dev)
183 183
184static SIMPLE_DEV_PM_OPS(pcf50633_pm, pcf50633_suspend, pcf50633_resume); 184static SIMPLE_DEV_PM_OPS(pcf50633_pm, pcf50633_suspend, pcf50633_resume);
185 185
186static struct regmap_config pcf50633_regmap_config = { 186static const struct regmap_config pcf50633_regmap_config = {
187 .reg_bits = 8, 187 .reg_bits = 8,
188 .val_bits = 8, 188 .val_bits = 8,
189}; 189};
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
new file mode 100644
index 000000000000..f696328c2933
--- /dev/null
+++ b/drivers/mfd/qcom_rpm.c
@@ -0,0 +1,581 @@
1/*
2 * Copyright (c) 2014, Sony Mobile Communications AB.
3 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
4 * Author: Bjorn Andersson <bjorn.andersson@sonymobile.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/of_platform.h>
19#include <linux/io.h>
20#include <linux/interrupt.h>
21#include <linux/mfd/qcom_rpm.h>
22#include <linux/mfd/syscon.h>
23#include <linux/regmap.h>
24
25#include <dt-bindings/mfd/qcom-rpm.h>
26
27struct qcom_rpm_resource {
28 unsigned target_id;
29 unsigned status_id;
30 unsigned select_id;
31 unsigned size;
32};
33
34struct qcom_rpm_data {
35 u32 version;
36 const struct qcom_rpm_resource *resource_table;
37 unsigned n_resources;
38};
39
40struct qcom_rpm {
41 struct device *dev;
42 struct regmap *ipc_regmap;
43 unsigned ipc_offset;
44 unsigned ipc_bit;
45
46 struct completion ack;
47 struct mutex lock;
48
49 void __iomem *status_regs;
50 void __iomem *ctrl_regs;
51 void __iomem *req_regs;
52
53 u32 ack_status;
54
55 const struct qcom_rpm_data *data;
56};
57
58#define RPM_STATUS_REG(rpm, i) ((rpm)->status_regs + (i) * 4)
59#define RPM_CTRL_REG(rpm, i) ((rpm)->ctrl_regs + (i) * 4)
60#define RPM_REQ_REG(rpm, i) ((rpm)->req_regs + (i) * 4)
61
62#define RPM_REQUEST_TIMEOUT (5 * HZ)
63
64#define RPM_REQUEST_CONTEXT 3
65#define RPM_REQ_SELECT 11
66#define RPM_ACK_CONTEXT 15
67#define RPM_ACK_SELECTOR 23
68#define RPM_SELECT_SIZE 7
69
70#define RPM_NOTIFICATION BIT(30)
71#define RPM_REJECTED BIT(31)
72
73#define RPM_SIGNAL BIT(2)
74
75static const struct qcom_rpm_resource apq8064_rpm_resource_table[] = {
76 [QCOM_RPM_CXO_CLK] = { 25, 9, 5, 1 },
77 [QCOM_RPM_PXO_CLK] = { 26, 10, 6, 1 },
78 [QCOM_RPM_APPS_FABRIC_CLK] = { 27, 11, 8, 1 },
79 [QCOM_RPM_SYS_FABRIC_CLK] = { 28, 12, 9, 1 },
80 [QCOM_RPM_MM_FABRIC_CLK] = { 29, 13, 10, 1 },
81 [QCOM_RPM_DAYTONA_FABRIC_CLK] = { 30, 14, 11, 1 },
82 [QCOM_RPM_SFPB_CLK] = { 31, 15, 12, 1 },
83 [QCOM_RPM_CFPB_CLK] = { 32, 16, 13, 1 },
84 [QCOM_RPM_MMFPB_CLK] = { 33, 17, 14, 1 },
85 [QCOM_RPM_EBI1_CLK] = { 34, 18, 16, 1 },
86 [QCOM_RPM_APPS_FABRIC_HALT] = { 35, 19, 18, 1 },
87 [QCOM_RPM_APPS_FABRIC_MODE] = { 37, 20, 19, 1 },
88 [QCOM_RPM_APPS_FABRIC_IOCTL] = { 40, 21, 20, 1 },
89 [QCOM_RPM_APPS_FABRIC_ARB] = { 41, 22, 21, 12 },
90 [QCOM_RPM_SYS_FABRIC_HALT] = { 53, 23, 22, 1 },
91 [QCOM_RPM_SYS_FABRIC_MODE] = { 55, 24, 23, 1 },
92 [QCOM_RPM_SYS_FABRIC_IOCTL] = { 58, 25, 24, 1 },
93 [QCOM_RPM_SYS_FABRIC_ARB] = { 59, 26, 25, 30 },
94 [QCOM_RPM_MM_FABRIC_HALT] = { 89, 27, 26, 1 },
95 [QCOM_RPM_MM_FABRIC_MODE] = { 91, 28, 27, 1 },
96 [QCOM_RPM_MM_FABRIC_IOCTL] = { 94, 29, 28, 1 },
97 [QCOM_RPM_MM_FABRIC_ARB] = { 95, 30, 29, 21 },
98 [QCOM_RPM_PM8921_SMPS1] = { 116, 31, 30, 2 },
99 [QCOM_RPM_PM8921_SMPS2] = { 118, 33, 31, 2 },
100 [QCOM_RPM_PM8921_SMPS3] = { 120, 35, 32, 2 },
101 [QCOM_RPM_PM8921_SMPS4] = { 122, 37, 33, 2 },
102 [QCOM_RPM_PM8921_SMPS5] = { 124, 39, 34, 2 },
103 [QCOM_RPM_PM8921_SMPS6] = { 126, 41, 35, 2 },
104 [QCOM_RPM_PM8921_SMPS7] = { 128, 43, 36, 2 },
105 [QCOM_RPM_PM8921_SMPS8] = { 130, 45, 37, 2 },
106 [QCOM_RPM_PM8921_LDO1] = { 132, 47, 38, 2 },
107 [QCOM_RPM_PM8921_LDO2] = { 134, 49, 39, 2 },
108 [QCOM_RPM_PM8921_LDO3] = { 136, 51, 40, 2 },
109 [QCOM_RPM_PM8921_LDO4] = { 138, 53, 41, 2 },
110 [QCOM_RPM_PM8921_LDO5] = { 140, 55, 42, 2 },
111 [QCOM_RPM_PM8921_LDO6] = { 142, 57, 43, 2 },
112 [QCOM_RPM_PM8921_LDO7] = { 144, 59, 44, 2 },
113 [QCOM_RPM_PM8921_LDO8] = { 146, 61, 45, 2 },
114 [QCOM_RPM_PM8921_LDO9] = { 148, 63, 46, 2 },
115 [QCOM_RPM_PM8921_LDO10] = { 150, 65, 47, 2 },
116 [QCOM_RPM_PM8921_LDO11] = { 152, 67, 48, 2 },
117 [QCOM_RPM_PM8921_LDO12] = { 154, 69, 49, 2 },
118 [QCOM_RPM_PM8921_LDO13] = { 156, 71, 50, 2 },
119 [QCOM_RPM_PM8921_LDO14] = { 158, 73, 51, 2 },
120 [QCOM_RPM_PM8921_LDO15] = { 160, 75, 52, 2 },
121 [QCOM_RPM_PM8921_LDO16] = { 162, 77, 53, 2 },
122 [QCOM_RPM_PM8921_LDO17] = { 164, 79, 54, 2 },
123 [QCOM_RPM_PM8921_LDO18] = { 166, 81, 55, 2 },
124 [QCOM_RPM_PM8921_LDO19] = { 168, 83, 56, 2 },
125 [QCOM_RPM_PM8921_LDO20] = { 170, 85, 57, 2 },
126 [QCOM_RPM_PM8921_LDO21] = { 172, 87, 58, 2 },
127 [QCOM_RPM_PM8921_LDO22] = { 174, 89, 59, 2 },
128 [QCOM_RPM_PM8921_LDO23] = { 176, 91, 60, 2 },
129 [QCOM_RPM_PM8921_LDO24] = { 178, 93, 61, 2 },
130 [QCOM_RPM_PM8921_LDO25] = { 180, 95, 62, 2 },
131 [QCOM_RPM_PM8921_LDO26] = { 182, 97, 63, 2 },
132 [QCOM_RPM_PM8921_LDO27] = { 184, 99, 64, 2 },
133 [QCOM_RPM_PM8921_LDO28] = { 186, 101, 65, 2 },
134 [QCOM_RPM_PM8921_LDO29] = { 188, 103, 66, 2 },
135 [QCOM_RPM_PM8921_CLK1] = { 190, 105, 67, 2 },
136 [QCOM_RPM_PM8921_CLK2] = { 192, 107, 68, 2 },
137 [QCOM_RPM_PM8921_LVS1] = { 194, 109, 69, 1 },
138 [QCOM_RPM_PM8921_LVS2] = { 195, 110, 70, 1 },
139 [QCOM_RPM_PM8921_LVS3] = { 196, 111, 71, 1 },
140 [QCOM_RPM_PM8921_LVS4] = { 197, 112, 72, 1 },
141 [QCOM_RPM_PM8921_LVS5] = { 198, 113, 73, 1 },
142 [QCOM_RPM_PM8921_LVS6] = { 199, 114, 74, 1 },
143 [QCOM_RPM_PM8921_LVS7] = { 200, 115, 75, 1 },
144 [QCOM_RPM_PM8821_SMPS1] = { 201, 116, 76, 2 },
145 [QCOM_RPM_PM8821_SMPS2] = { 203, 118, 77, 2 },
146 [QCOM_RPM_PM8821_LDO1] = { 205, 120, 78, 2 },
147 [QCOM_RPM_PM8921_NCP] = { 207, 122, 80, 2 },
148 [QCOM_RPM_CXO_BUFFERS] = { 209, 124, 81, 1 },
149 [QCOM_RPM_USB_OTG_SWITCH] = { 210, 125, 82, 1 },
150 [QCOM_RPM_HDMI_SWITCH] = { 211, 126, 83, 1 },
151 [QCOM_RPM_DDR_DMM] = { 212, 127, 84, 2 },
152 [QCOM_RPM_VDDMIN_GPIO] = { 215, 131, 89, 1 },
153};
154
155static const struct qcom_rpm_data apq8064_template = {
156 .version = 3,
157 .resource_table = apq8064_rpm_resource_table,
158 .n_resources = ARRAY_SIZE(apq8064_rpm_resource_table),
159};
160
161static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = {
162 [QCOM_RPM_CXO_CLK] = { 32, 12, 5, 1 },
163 [QCOM_RPM_PXO_CLK] = { 33, 13, 6, 1 },
164 [QCOM_RPM_PLL_4] = { 34, 14, 7, 1 },
165 [QCOM_RPM_APPS_FABRIC_CLK] = { 35, 15, 8, 1 },
166 [QCOM_RPM_SYS_FABRIC_CLK] = { 36, 16, 9, 1 },
167 [QCOM_RPM_MM_FABRIC_CLK] = { 37, 17, 10, 1 },
168 [QCOM_RPM_DAYTONA_FABRIC_CLK] = { 38, 18, 11, 1 },
169 [QCOM_RPM_SFPB_CLK] = { 39, 19, 12, 1 },
170 [QCOM_RPM_CFPB_CLK] = { 40, 20, 13, 1 },
171 [QCOM_RPM_MMFPB_CLK] = { 41, 21, 14, 1 },
172 [QCOM_RPM_SMI_CLK] = { 42, 22, 15, 1 },
173 [QCOM_RPM_EBI1_CLK] = { 43, 23, 16, 1 },
174 [QCOM_RPM_APPS_L2_CACHE_CTL] = { 44, 24, 17, 1 },
175 [QCOM_RPM_APPS_FABRIC_HALT] = { 45, 25, 18, 2 },
176 [QCOM_RPM_APPS_FABRIC_MODE] = { 47, 26, 19, 3 },
177 [QCOM_RPM_APPS_FABRIC_ARB] = { 51, 28, 21, 6 },
178 [QCOM_RPM_SYS_FABRIC_HALT] = { 63, 29, 22, 2 },
179 [QCOM_RPM_SYS_FABRIC_MODE] = { 65, 30, 23, 3 },
180 [QCOM_RPM_SYS_FABRIC_ARB] = { 69, 32, 25, 22 },
181 [QCOM_RPM_MM_FABRIC_HALT] = { 105, 33, 26, 2 },
182 [QCOM_RPM_MM_FABRIC_MODE] = { 107, 34, 27, 3 },
183 [QCOM_RPM_MM_FABRIC_ARB] = { 111, 36, 29, 23 },
184 [QCOM_RPM_PM8901_SMPS0] = { 134, 37, 30, 2 },
185 [QCOM_RPM_PM8901_SMPS1] = { 136, 39, 31, 2 },
186 [QCOM_RPM_PM8901_SMPS2] = { 138, 41, 32, 2 },
187 [QCOM_RPM_PM8901_SMPS3] = { 140, 43, 33, 2 },
188 [QCOM_RPM_PM8901_SMPS4] = { 142, 45, 34, 2 },
189 [QCOM_RPM_PM8901_LDO0] = { 144, 47, 35, 2 },
190 [QCOM_RPM_PM8901_LDO1] = { 146, 49, 36, 2 },
191 [QCOM_RPM_PM8901_LDO2] = { 148, 51, 37, 2 },
192 [QCOM_RPM_PM8901_LDO3] = { 150, 53, 38, 2 },
193 [QCOM_RPM_PM8901_LDO4] = { 152, 55, 39, 2 },
194 [QCOM_RPM_PM8901_LDO5] = { 154, 57, 40, 2 },
195 [QCOM_RPM_PM8901_LDO6] = { 156, 59, 41, 2 },
196 [QCOM_RPM_PM8901_LVS0] = { 158, 61, 42, 1 },
197 [QCOM_RPM_PM8901_LVS1] = { 159, 62, 43, 1 },
198 [QCOM_RPM_PM8901_LVS2] = { 160, 63, 44, 1 },
199 [QCOM_RPM_PM8901_LVS3] = { 161, 64, 45, 1 },
200 [QCOM_RPM_PM8901_MVS] = { 162, 65, 46, 1 },
201 [QCOM_RPM_PM8058_SMPS0] = { 163, 66, 47, 2 },
202 [QCOM_RPM_PM8058_SMPS1] = { 165, 68, 48, 2 },
203 [QCOM_RPM_PM8058_SMPS2] = { 167, 70, 49, 2 },
204 [QCOM_RPM_PM8058_SMPS3] = { 169, 72, 50, 2 },
205 [QCOM_RPM_PM8058_SMPS4] = { 171, 74, 51, 2 },
206 [QCOM_RPM_PM8058_LDO0] = { 173, 76, 52, 2 },
207 [QCOM_RPM_PM8058_LDO1] = { 175, 78, 53, 2 },
208 [QCOM_RPM_PM8058_LDO2] = { 177, 80, 54, 2 },
209 [QCOM_RPM_PM8058_LDO3] = { 179, 82, 55, 2 },
210 [QCOM_RPM_PM8058_LDO4] = { 181, 84, 56, 2 },
211 [QCOM_RPM_PM8058_LDO5] = { 183, 86, 57, 2 },
212 [QCOM_RPM_PM8058_LDO6] = { 185, 88, 58, 2 },
213 [QCOM_RPM_PM8058_LDO7] = { 187, 90, 59, 2 },
214 [QCOM_RPM_PM8058_LDO8] = { 189, 92, 60, 2 },
215 [QCOM_RPM_PM8058_LDO9] = { 191, 94, 61, 2 },
216 [QCOM_RPM_PM8058_LDO10] = { 193, 96, 62, 2 },
217 [QCOM_RPM_PM8058_LDO11] = { 195, 98, 63, 2 },
218 [QCOM_RPM_PM8058_LDO12] = { 197, 100, 64, 2 },
219 [QCOM_RPM_PM8058_LDO13] = { 199, 102, 65, 2 },
220 [QCOM_RPM_PM8058_LDO14] = { 201, 104, 66, 2 },
221 [QCOM_RPM_PM8058_LDO15] = { 203, 106, 67, 2 },
222 [QCOM_RPM_PM8058_LDO16] = { 205, 108, 68, 2 },
223 [QCOM_RPM_PM8058_LDO17] = { 207, 110, 69, 2 },
224 [QCOM_RPM_PM8058_LDO18] = { 209, 112, 70, 2 },
225 [QCOM_RPM_PM8058_LDO19] = { 211, 114, 71, 2 },
226 [QCOM_RPM_PM8058_LDO20] = { 213, 116, 72, 2 },
227 [QCOM_RPM_PM8058_LDO21] = { 215, 118, 73, 2 },
228 [QCOM_RPM_PM8058_LDO22] = { 217, 120, 74, 2 },
229 [QCOM_RPM_PM8058_LDO23] = { 219, 122, 75, 2 },
230 [QCOM_RPM_PM8058_LDO24] = { 221, 124, 76, 2 },
231 [QCOM_RPM_PM8058_LDO25] = { 223, 126, 77, 2 },
232 [QCOM_RPM_PM8058_LVS0] = { 225, 128, 78, 1 },
233 [QCOM_RPM_PM8058_LVS1] = { 226, 129, 79, 1 },
234 [QCOM_RPM_PM8058_NCP] = { 227, 130, 80, 2 },
235 [QCOM_RPM_CXO_BUFFERS] = { 229, 132, 81, 1 },
236};
237
238static const struct qcom_rpm_data msm8660_template = {
239 .version = 2,
240 .resource_table = msm8660_rpm_resource_table,
241 .n_resources = ARRAY_SIZE(msm8660_rpm_resource_table),
242};
243
244static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = {
245 [QCOM_RPM_CXO_CLK] = { 25, 9, 5, 1 },
246 [QCOM_RPM_PXO_CLK] = { 26, 10, 6, 1 },
247 [QCOM_RPM_APPS_FABRIC_CLK] = { 27, 11, 8, 1 },
248 [QCOM_RPM_SYS_FABRIC_CLK] = { 28, 12, 9, 1 },
249 [QCOM_RPM_MM_FABRIC_CLK] = { 29, 13, 10, 1 },
250 [QCOM_RPM_DAYTONA_FABRIC_CLK] = { 30, 14, 11, 1 },
251 [QCOM_RPM_SFPB_CLK] = { 31, 15, 12, 1 },
252 [QCOM_RPM_CFPB_CLK] = { 32, 16, 13, 1 },
253 [QCOM_RPM_MMFPB_CLK] = { 33, 17, 14, 1 },
254 [QCOM_RPM_EBI1_CLK] = { 34, 18, 16, 1 },
255 [QCOM_RPM_APPS_FABRIC_HALT] = { 35, 19, 18, 1 },
256 [QCOM_RPM_APPS_FABRIC_MODE] = { 37, 20, 19, 1 },
257 [QCOM_RPM_APPS_FABRIC_IOCTL] = { 40, 21, 20, 1 },
258 [QCOM_RPM_APPS_FABRIC_ARB] = { 41, 22, 21, 12 },
259 [QCOM_RPM_SYS_FABRIC_HALT] = { 53, 23, 22, 1 },
260 [QCOM_RPM_SYS_FABRIC_MODE] = { 55, 24, 23, 1 },
261 [QCOM_RPM_SYS_FABRIC_IOCTL] = { 58, 25, 24, 1 },
262 [QCOM_RPM_SYS_FABRIC_ARB] = { 59, 26, 25, 29 },
263 [QCOM_RPM_MM_FABRIC_HALT] = { 88, 27, 26, 1 },
264 [QCOM_RPM_MM_FABRIC_MODE] = { 90, 28, 27, 1 },
265 [QCOM_RPM_MM_FABRIC_IOCTL] = { 93, 29, 28, 1 },
266 [QCOM_RPM_MM_FABRIC_ARB] = { 94, 30, 29, 23 },
267 [QCOM_RPM_PM8921_SMPS1] = { 117, 31, 30, 2 },
268 [QCOM_RPM_PM8921_SMPS2] = { 119, 33, 31, 2 },
269 [QCOM_RPM_PM8921_SMPS3] = { 121, 35, 32, 2 },
270 [QCOM_RPM_PM8921_SMPS4] = { 123, 37, 33, 2 },
271 [QCOM_RPM_PM8921_SMPS5] = { 125, 39, 34, 2 },
272 [QCOM_RPM_PM8921_SMPS6] = { 127, 41, 35, 2 },
273 [QCOM_RPM_PM8921_SMPS7] = { 129, 43, 36, 2 },
274 [QCOM_RPM_PM8921_SMPS8] = { 131, 45, 37, 2 },
275 [QCOM_RPM_PM8921_LDO1] = { 133, 47, 38, 2 },
276 [QCOM_RPM_PM8921_LDO2] = { 135, 49, 39, 2 },
277 [QCOM_RPM_PM8921_LDO3] = { 137, 51, 40, 2 },
278 [QCOM_RPM_PM8921_LDO4] = { 139, 53, 41, 2 },
279 [QCOM_RPM_PM8921_LDO5] = { 141, 55, 42, 2 },
280 [QCOM_RPM_PM8921_LDO6] = { 143, 57, 43, 2 },
281 [QCOM_RPM_PM8921_LDO7] = { 145, 59, 44, 2 },
282 [QCOM_RPM_PM8921_LDO8] = { 147, 61, 45, 2 },
283 [QCOM_RPM_PM8921_LDO9] = { 149, 63, 46, 2 },
284 [QCOM_RPM_PM8921_LDO10] = { 151, 65, 47, 2 },
285 [QCOM_RPM_PM8921_LDO11] = { 153, 67, 48, 2 },
286 [QCOM_RPM_PM8921_LDO12] = { 155, 69, 49, 2 },
287 [QCOM_RPM_PM8921_LDO13] = { 157, 71, 50, 2 },
288 [QCOM_RPM_PM8921_LDO14] = { 159, 73, 51, 2 },
289 [QCOM_RPM_PM8921_LDO15] = { 161, 75, 52, 2 },
290 [QCOM_RPM_PM8921_LDO16] = { 163, 77, 53, 2 },
291 [QCOM_RPM_PM8921_LDO17] = { 165, 79, 54, 2 },
292 [QCOM_RPM_PM8921_LDO18] = { 167, 81, 55, 2 },
293 [QCOM_RPM_PM8921_LDO19] = { 169, 83, 56, 2 },
294 [QCOM_RPM_PM8921_LDO20] = { 171, 85, 57, 2 },
295 [QCOM_RPM_PM8921_LDO21] = { 173, 87, 58, 2 },
296 [QCOM_RPM_PM8921_LDO22] = { 175, 89, 59, 2 },
297 [QCOM_RPM_PM8921_LDO23] = { 177, 91, 60, 2 },
298 [QCOM_RPM_PM8921_LDO24] = { 179, 93, 61, 2 },
299 [QCOM_RPM_PM8921_LDO25] = { 181, 95, 62, 2 },
300 [QCOM_RPM_PM8921_LDO26] = { 183, 97, 63, 2 },
301 [QCOM_RPM_PM8921_LDO27] = { 185, 99, 64, 2 },
302 [QCOM_RPM_PM8921_LDO28] = { 187, 101, 65, 2 },
303 [QCOM_RPM_PM8921_LDO29] = { 189, 103, 66, 2 },
304 [QCOM_RPM_PM8921_CLK1] = { 191, 105, 67, 2 },
305 [QCOM_RPM_PM8921_CLK2] = { 193, 107, 68, 2 },
306 [QCOM_RPM_PM8921_LVS1] = { 195, 109, 69, 1 },
307 [QCOM_RPM_PM8921_LVS2] = { 196, 110, 70, 1 },
308 [QCOM_RPM_PM8921_LVS3] = { 197, 111, 71, 1 },
309 [QCOM_RPM_PM8921_LVS4] = { 198, 112, 72, 1 },
310 [QCOM_RPM_PM8921_LVS5] = { 199, 113, 73, 1 },
311 [QCOM_RPM_PM8921_LVS6] = { 200, 114, 74, 1 },
312 [QCOM_RPM_PM8921_LVS7] = { 201, 115, 75, 1 },
313 [QCOM_RPM_PM8921_NCP] = { 202, 116, 80, 2 },
314 [QCOM_RPM_CXO_BUFFERS] = { 204, 118, 81, 1 },
315 [QCOM_RPM_USB_OTG_SWITCH] = { 205, 119, 82, 1 },
316 [QCOM_RPM_HDMI_SWITCH] = { 206, 120, 83, 1 },
317 [QCOM_RPM_DDR_DMM] = { 207, 121, 84, 2 },
318};
319
320static const struct qcom_rpm_data msm8960_template = {
321 .version = 3,
322 .resource_table = msm8960_rpm_resource_table,
323 .n_resources = ARRAY_SIZE(msm8960_rpm_resource_table),
324};
325
326static const struct of_device_id qcom_rpm_of_match[] = {
327 { .compatible = "qcom,rpm-apq8064", .data = &apq8064_template },
328 { .compatible = "qcom,rpm-msm8660", .data = &msm8660_template },
329 { .compatible = "qcom,rpm-msm8960", .data = &msm8960_template },
330 { }
331};
332MODULE_DEVICE_TABLE(of, qcom_rpm_of_match);
333
334int qcom_rpm_write(struct qcom_rpm *rpm,
335 int state,
336 int resource,
337 u32 *buf, size_t count)
338{
339 const struct qcom_rpm_resource *res;
340 const struct qcom_rpm_data *data = rpm->data;
341 u32 sel_mask[RPM_SELECT_SIZE] = { 0 };
342 int left;
343 int ret = 0;
344 int i;
345
346 if (WARN_ON(resource < 0 || resource >= data->n_resources))
347 return -EINVAL;
348
349 res = &data->resource_table[resource];
350 if (WARN_ON(res->size != count))
351 return -EINVAL;
352
353 mutex_lock(&rpm->lock);
354
355 for (i = 0; i < res->size; i++)
356 writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i));
357
358 bitmap_set((unsigned long *)sel_mask, res->select_id, 1);
359 for (i = 0; i < ARRAY_SIZE(sel_mask); i++) {
360 writel_relaxed(sel_mask[i],
361 RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i));
362 }
363
364 writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT));
365
366 reinit_completion(&rpm->ack);
367 regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit));
368
369 left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT);
370 if (!left)
371 ret = -ETIMEDOUT;
372 else if (rpm->ack_status & RPM_REJECTED)
373 ret = -EIO;
374
375 mutex_unlock(&rpm->lock);
376
377 return ret;
378}
379EXPORT_SYMBOL(qcom_rpm_write);
380
381static irqreturn_t qcom_rpm_ack_interrupt(int irq, void *dev)
382{
383 struct qcom_rpm *rpm = dev;
384 u32 ack;
385 int i;
386
387 ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
388 for (i = 0; i < RPM_SELECT_SIZE; i++)
389 writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i));
390 writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
391
392 if (ack & RPM_NOTIFICATION) {
393 dev_warn(rpm->dev, "ignoring notification!\n");
394 } else {
395 rpm->ack_status = ack;
396 complete(&rpm->ack);
397 }
398
399 return IRQ_HANDLED;
400}
401
402static irqreturn_t qcom_rpm_err_interrupt(int irq, void *dev)
403{
404 struct qcom_rpm *rpm = dev;
405
406 regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit));
407 dev_err(rpm->dev, "RPM triggered fatal error\n");
408
409 return IRQ_HANDLED;
410}
411
412static irqreturn_t qcom_rpm_wakeup_interrupt(int irq, void *dev)
413{
414 return IRQ_HANDLED;
415}
416
417static int qcom_rpm_probe(struct platform_device *pdev)
418{
419 const struct of_device_id *match;
420 struct device_node *syscon_np;
421 struct resource *res;
422 struct qcom_rpm *rpm;
423 u32 fw_version[3];
424 int irq_wakeup;
425 int irq_ack;
426 int irq_err;
427 int ret;
428
429 rpm = devm_kzalloc(&pdev->dev, sizeof(*rpm), GFP_KERNEL);
430 if (!rpm)
431 return -ENOMEM;
432
433 rpm->dev = &pdev->dev;
434 mutex_init(&rpm->lock);
435 init_completion(&rpm->ack);
436
437 irq_ack = platform_get_irq_byname(pdev, "ack");
438 if (irq_ack < 0) {
439 dev_err(&pdev->dev, "required ack interrupt missing\n");
440 return irq_ack;
441 }
442
443 irq_err = platform_get_irq_byname(pdev, "err");
444 if (irq_err < 0) {
445 dev_err(&pdev->dev, "required err interrupt missing\n");
446 return irq_err;
447 }
448
449 irq_wakeup = platform_get_irq_byname(pdev, "wakeup");
450 if (irq_wakeup < 0) {
451 dev_err(&pdev->dev, "required wakeup interrupt missing\n");
452 return irq_wakeup;
453 }
454
455 match = of_match_device(qcom_rpm_of_match, &pdev->dev);
456 rpm->data = match->data;
457
458 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
459 rpm->status_regs = devm_ioremap_resource(&pdev->dev, res);
460 if (IS_ERR(rpm->status_regs))
461 return PTR_ERR(rpm->status_regs);
462 rpm->ctrl_regs = rpm->status_regs + 0x400;
463 rpm->req_regs = rpm->status_regs + 0x600;
464
465 syscon_np = of_parse_phandle(pdev->dev.of_node, "qcom,ipc", 0);
466 if (!syscon_np) {
467 dev_err(&pdev->dev, "no qcom,ipc node\n");
468 return -ENODEV;
469 }
470
471 rpm->ipc_regmap = syscon_node_to_regmap(syscon_np);
472 if (IS_ERR(rpm->ipc_regmap))
473 return PTR_ERR(rpm->ipc_regmap);
474
475 ret = of_property_read_u32_index(pdev->dev.of_node, "qcom,ipc", 1,
476 &rpm->ipc_offset);
477 if (ret < 0) {
478 dev_err(&pdev->dev, "no offset in qcom,ipc\n");
479 return -EINVAL;
480 }
481
482 ret = of_property_read_u32_index(pdev->dev.of_node, "qcom,ipc", 2,
483 &rpm->ipc_bit);
484 if (ret < 0) {
485 dev_err(&pdev->dev, "no bit in qcom,ipc\n");
486 return -EINVAL;
487 }
488
489 dev_set_drvdata(&pdev->dev, rpm);
490
491 fw_version[0] = readl(RPM_STATUS_REG(rpm, 0));
492 fw_version[1] = readl(RPM_STATUS_REG(rpm, 1));
493 fw_version[2] = readl(RPM_STATUS_REG(rpm, 2));
494 if (fw_version[0] != rpm->data->version) {
495 dev_err(&pdev->dev,
496 "RPM version %u.%u.%u incompatible with driver version %u",
497 fw_version[0],
498 fw_version[1],
499 fw_version[2],
500 rpm->data->version);
501 return -EFAULT;
502 }
503
504 dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
505 fw_version[1],
506 fw_version[2]);
507
508 ret = devm_request_irq(&pdev->dev,
509 irq_ack,
510 qcom_rpm_ack_interrupt,
511 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
512 "qcom_rpm_ack",
513 rpm);
514 if (ret) {
515 dev_err(&pdev->dev, "failed to request ack interrupt\n");
516 return ret;
517 }
518
519 ret = irq_set_irq_wake(irq_ack, 1);
520 if (ret)
521 dev_warn(&pdev->dev, "failed to mark ack irq as wakeup\n");
522
523 ret = devm_request_irq(&pdev->dev,
524 irq_err,
525 qcom_rpm_err_interrupt,
526 IRQF_TRIGGER_RISING,
527 "qcom_rpm_err",
528 rpm);
529 if (ret) {
530 dev_err(&pdev->dev, "failed to request err interrupt\n");
531 return ret;
532 }
533
534 ret = devm_request_irq(&pdev->dev,
535 irq_wakeup,
536 qcom_rpm_wakeup_interrupt,
537 IRQF_TRIGGER_RISING,
538 "qcom_rpm_wakeup",
539 rpm);
540 if (ret) {
541 dev_err(&pdev->dev, "failed to request wakeup interrupt\n");
542 return ret;
543 }
544
545 ret = irq_set_irq_wake(irq_wakeup, 1);
546 if (ret)
547 dev_warn(&pdev->dev, "failed to mark wakeup irq as wakeup\n");
548
549 return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
550}
551
552static int qcom_rpm_remove(struct platform_device *pdev)
553{
554 of_platform_depopulate(&pdev->dev);
555 return 0;
556}
557
558static struct platform_driver qcom_rpm_driver = {
559 .probe = qcom_rpm_probe,
560 .remove = qcom_rpm_remove,
561 .driver = {
562 .name = "qcom_rpm",
563 .of_match_table = qcom_rpm_of_match,
564 },
565};
566
567static int __init qcom_rpm_init(void)
568{
569 return platform_driver_register(&qcom_rpm_driver);
570}
571arch_initcall(qcom_rpm_init);
572
573static void __exit qcom_rpm_exit(void)
574{
575 platform_driver_unregister(&qcom_rpm_driver);
576}
577module_exit(qcom_rpm_exit)
578
579MODULE_DESCRIPTION("Qualcomm Resource Power Manager driver");
580MODULE_LICENSE("GPL v2");
581MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index 663f8a37aa6b..2d64430c719b 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -222,7 +222,7 @@ static struct regmap_bus retu_bus = {
222 .val_format_endian_default = REGMAP_ENDIAN_NATIVE, 222 .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
223}; 223};
224 224
225static struct regmap_config retu_config = { 225static const struct regmap_config retu_config = {
226 .reg_bits = 8, 226 .reg_bits = 8,
227 .val_bits = 16, 227 .val_bits = 16,
228}; 228};
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
new file mode 100644
index 000000000000..db395a6c52bc
--- /dev/null
+++ b/drivers/mfd/rt5033.c
@@ -0,0 +1,142 @@
1/*
2 * MFD core driver for the Richtek RT5033.
3 *
4 * RT5033 comprises multiple sub-devices switcing charger, fuel gauge,
5 * flash LED, current source, LDO and BUCK regulators.
6 *
7 * Copyright (C) 2014 Samsung Electronics, Co., Ltd.
8 * Author: Beomho Seo <beomho.seo@samsung.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published bythe Free Software Foundation.
13 */
14
15#include <linux/err.h>
16#include <linux/module.h>
17#include <linux/interrupt.h>
18#include <linux/of_device.h>
19#include <linux/mfd/core.h>
20#include <linux/mfd/rt5033.h>
21#include <linux/mfd/rt5033-private.h>
22
23static const struct regmap_irq rt5033_irqs[] = {
24 { .mask = RT5033_PMIC_IRQ_BUCKOCP, },
25 { .mask = RT5033_PMIC_IRQ_BUCKLV, },
26 { .mask = RT5033_PMIC_IRQ_SAFELDOLV, },
27 { .mask = RT5033_PMIC_IRQ_LDOLV, },
28 { .mask = RT5033_PMIC_IRQ_OT, },
29 { .mask = RT5033_PMIC_IRQ_VDDA_UV, },
30};
31
32static const struct regmap_irq_chip rt5033_irq_chip = {
33 .name = "rt5033",
34 .status_base = RT5033_REG_PMIC_IRQ_STAT,
35 .mask_base = RT5033_REG_PMIC_IRQ_CTRL,
36 .mask_invert = true,
37 .num_regs = 1,
38 .irqs = rt5033_irqs,
39 .num_irqs = ARRAY_SIZE(rt5033_irqs),
40};
41
42static const struct mfd_cell rt5033_devs[] = {
43 { .name = "rt5033-regulator", },
44 {
45 .name = "rt5033-charger",
46 .of_compatible = "richtek,rt5033-charger",
47 }, {
48 .name = "rt5033-battery",
49 .of_compatible = "richtek,rt5033-battery",
50 },
51};
52
53static const struct regmap_config rt5033_regmap_config = {
54 .reg_bits = 8,
55 .val_bits = 8,
56 .max_register = RT5033_REG_END,
57};
58
59static int rt5033_i2c_probe(struct i2c_client *i2c,
60 const struct i2c_device_id *id)
61{
62 struct rt5033_dev *rt5033;
63 unsigned int dev_id;
64 int ret;
65
66 rt5033 = devm_kzalloc(&i2c->dev, sizeof(*rt5033), GFP_KERNEL);
67 if (!rt5033)
68 return -ENOMEM;
69
70 i2c_set_clientdata(i2c, rt5033);
71 rt5033->dev = &i2c->dev;
72 rt5033->irq = i2c->irq;
73 rt5033->wakeup = true;
74
75 rt5033->regmap = devm_regmap_init_i2c(i2c, &rt5033_regmap_config);
76 if (IS_ERR(rt5033->regmap)) {
77 dev_err(&i2c->dev, "Failed to allocate register map.\n");
78 return PTR_ERR(rt5033->regmap);
79 }
80
81 ret = regmap_read(rt5033->regmap, RT5033_REG_DEVICE_ID, &dev_id);
82 if (ret) {
83 dev_err(&i2c->dev, "Device not found\n");
84 return -ENODEV;
85 }
86 dev_info(&i2c->dev, "Device found Device ID: %04x\n", dev_id);
87
88 ret = regmap_add_irq_chip(rt5033->regmap, rt5033->irq,
89 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
90 0, &rt5033_irq_chip, &rt5033->irq_data);
91 if (ret) {
92 dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n",
93 rt5033->irq, ret);
94 return ret;
95 }
96
97 ret = mfd_add_devices(rt5033->dev, -1, rt5033_devs,
98 ARRAY_SIZE(rt5033_devs), NULL, 0,
99 regmap_irq_get_domain(rt5033->irq_data));
100 if (ret < 0) {
101 dev_err(&i2c->dev, "Failed to add RT5033 child devices.\n");
102 return ret;
103 }
104
105 device_init_wakeup(rt5033->dev, rt5033->wakeup);
106
107 return 0;
108}
109
110static int rt5033_i2c_remove(struct i2c_client *i2c)
111{
112 mfd_remove_devices(&i2c->dev);
113
114 return 0;
115}
116
117static const struct i2c_device_id rt5033_i2c_id[] = {
118 { "rt5033", },
119 { }
120};
121MODULE_DEVICE_TABLE(i2c, rt5033_i2c_id);
122
123static const struct of_device_id rt5033_dt_match[] = {
124 { .compatible = "richtek,rt5033", },
125 { }
126};
127
128static struct i2c_driver rt5033_driver = {
129 .driver = {
130 .name = "rt5033",
131 .of_match_table = of_match_ptr(rt5033_dt_match),
132 },
133 .probe = rt5033_i2c_probe,
134 .remove = rt5033_i2c_remove,
135 .id_table = rt5033_i2c_id,
136};
137module_i2c_driver(rt5033_driver);
138
139MODULE_ALIAS("i2c:rt5033");
140MODULE_DESCRIPTION("Richtek RT5033 multi-function core driver");
141MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
142MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index 210d1f85679e..ede50244f265 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -681,9 +681,27 @@ static void rtsx_usb_disconnect(struct usb_interface *intf)
681#ifdef CONFIG_PM 681#ifdef CONFIG_PM
682static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) 682static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
683{ 683{
684 struct rtsx_ucr *ucr =
685 (struct rtsx_ucr *)usb_get_intfdata(intf);
686 u16 val = 0;
687
684 dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n", 688 dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n",
685 __func__, message.event); 689 __func__, message.event);
686 690
691 if (PMSG_IS_AUTO(message)) {
692 if (mutex_trylock(&ucr->dev_mutex)) {
693 rtsx_usb_get_card_status(ucr, &val);
694 mutex_unlock(&ucr->dev_mutex);
695
696 /* Defer the autosuspend if card exists */
697 if (val & (SD_CD | MS_CD))
698 return -EAGAIN;
699 } else {
700 /* There is an ongoing operation*/
701 return -EAGAIN;
702 }
703 }
704
687 return 0; 705 return 0;
688} 706}
689 707
diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c
index 90112d4cc905..03246880d484 100644
--- a/drivers/mfd/smsc-ece1099.c
+++ b/drivers/mfd/smsc-ece1099.c
@@ -24,7 +24,7 @@
24#include <linux/mfd/smsc.h> 24#include <linux/mfd/smsc.h>
25#include <linux/of_platform.h> 25#include <linux/of_platform.h>
26 26
27static struct regmap_config smsc_regmap_config = { 27static const struct regmap_config smsc_regmap_config = {
28 .reg_bits = 8, 28 .reg_bits = 8,
29 .val_bits = 8, 29 .val_bits = 8,
30 .max_register = SMSC_VEN_ID_H, 30 .max_register = SMSC_VEN_ID_H,
diff --git a/drivers/mfd/sun6i-prcm.c b/drivers/mfd/sun6i-prcm.c
index 2f2e9f062571..191173166d65 100644
--- a/drivers/mfd/sun6i-prcm.c
+++ b/drivers/mfd/sun6i-prcm.c
@@ -41,6 +41,14 @@ static const struct resource sun6i_a31_apb0_gates_clk_res[] = {
41 }, 41 },
42}; 42};
43 43
44static const struct resource sun6i_a31_ir_clk_res[] = {
45 {
46 .start = 0x54,
47 .end = 0x57,
48 .flags = IORESOURCE_MEM,
49 },
50};
51
44static const struct resource sun6i_a31_apb0_rstc_res[] = { 52static const struct resource sun6i_a31_apb0_rstc_res[] = {
45 { 53 {
46 .start = 0xb0, 54 .start = 0xb0,
@@ -69,6 +77,12 @@ static const struct mfd_cell sun6i_a31_prcm_subdevs[] = {
69 .resources = sun6i_a31_apb0_gates_clk_res, 77 .resources = sun6i_a31_apb0_gates_clk_res,
70 }, 78 },
71 { 79 {
80 .name = "sun6i-a31-ir-clk",
81 .of_compatible = "allwinner,sun4i-a10-mod0-clk",
82 .num_resources = ARRAY_SIZE(sun6i_a31_ir_clk_res),
83 .resources = sun6i_a31_ir_clk_res,
84 },
85 {
72 .name = "sun6i-a31-apb0-clock-reset", 86 .name = "sun6i-a31-apb0-clock-reset",
73 .of_compatible = "allwinner,sun6i-a31-clock-reset", 87 .of_compatible = "allwinner,sun6i-a31-clock-reset",
74 .num_resources = ARRAY_SIZE(sun6i_a31_apb0_rstc_res), 88 .num_resources = ARRAY_SIZE(sun6i_a31_apb0_rstc_res),
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 80a919a8ca97..7d1cfc1d3ce0 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -145,7 +145,7 @@ int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg,
145} 145}
146EXPORT_SYMBOL_GPL(tps65217_clear_bits); 146EXPORT_SYMBOL_GPL(tps65217_clear_bits);
147 147
148static struct regmap_config tps65217_regmap_config = { 148static const struct regmap_config tps65217_regmap_config = {
149 .reg_bits = 8, 149 .reg_bits = 8,
150 .val_bits = 8, 150 .val_bits = 8,
151 151
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index d6b764349f9d..7af11a8b9753 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -135,7 +135,7 @@ static const struct regmap_access_table tps65218_volatile_table = {
135 .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges), 135 .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges),
136}; 136};
137 137
138static struct regmap_config tps65218_regmap_config = { 138static const struct regmap_config tps65218_regmap_config = {
139 .reg_bits = 8, 139 .reg_bits = 8,
140 .val_bits = 8, 140 .val_bits = 8,
141 .cache_type = REGCACHE_RBTREE, 141 .cache_type = REGCACHE_RBTREE,
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index db11b4f40611..489674a2497e 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -207,7 +207,7 @@ static struct twl_mapping twl4030_map[] = {
207 { 2, TWL5031_BASEADD_INTERRUPTS }, 207 { 2, TWL5031_BASEADD_INTERRUPTS },
208}; 208};
209 209
210static struct reg_default twl4030_49_defaults[] = { 210static const struct reg_default twl4030_49_defaults[] = {
211 /* Audio Registers */ 211 /* Audio Registers */
212 { 0x01, 0x00}, /* CODEC_MODE */ 212 { 0x01, 0x00}, /* CODEC_MODE */
213 { 0x02, 0x00}, /* OPTION */ 213 { 0x02, 0x00}, /* OPTION */
@@ -306,7 +306,7 @@ static const struct regmap_access_table twl4030_49_volatile_table = {
306 .n_yes_ranges = ARRAY_SIZE(twl4030_49_volatile_ranges), 306 .n_yes_ranges = ARRAY_SIZE(twl4030_49_volatile_ranges),
307}; 307};
308 308
309static struct regmap_config twl4030_regmap_config[4] = { 309static const struct regmap_config twl4030_regmap_config[4] = {
310 { 310 {
311 /* Address 0x48 */ 311 /* Address 0x48 */
312 .reg_bits = 8, 312 .reg_bits = 8,
@@ -369,7 +369,7 @@ static struct twl_mapping twl6030_map[] = {
369 { 1, TWL6030_BASEADD_GASGAUGE }, 369 { 1, TWL6030_BASEADD_GASGAUGE },
370}; 370};
371 371
372static struct regmap_config twl6030_regmap_config[3] = { 372static const struct regmap_config twl6030_regmap_config[3] = {
373 { 373 {
374 /* Address 0x48 */ 374 /* Address 0x48 */
375 .reg_bits = 8, 375 .reg_bits = 8,
@@ -1087,7 +1087,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
1087 struct twl4030_platform_data *pdata = dev_get_platdata(&client->dev); 1087 struct twl4030_platform_data *pdata = dev_get_platdata(&client->dev);
1088 struct device_node *node = client->dev.of_node; 1088 struct device_node *node = client->dev.of_node;
1089 struct platform_device *pdev; 1089 struct platform_device *pdev;
1090 struct regmap_config *twl_regmap_config; 1090 const struct regmap_config *twl_regmap_config;
1091 int irq_base = 0; 1091 int irq_base = 0;
1092 int status; 1092 int status;
1093 unsigned i, num_slaves; 1093 unsigned i, num_slaves;
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index 9687645162ae..f71ee3dbc2a2 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -44,7 +44,7 @@
44#define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1) 44#define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1)
45#define TWL6040_NUM_SUPPLIES (2) 45#define TWL6040_NUM_SUPPLIES (2)
46 46
47static struct reg_default twl6040_defaults[] = { 47static const struct reg_default twl6040_defaults[] = {
48 { 0x01, 0x4B }, /* REG_ASICID (ro) */ 48 { 0x01, 0x4B }, /* REG_ASICID (ro) */
49 { 0x02, 0x00 }, /* REG_ASICREV (ro) */ 49 { 0x02, 0x00 }, /* REG_ASICREV (ro) */
50 { 0x03, 0x00 }, /* REG_INTID */ 50 { 0x03, 0x00 }, /* REG_INTID */
@@ -580,7 +580,7 @@ static bool twl6040_writeable_reg(struct device *dev, unsigned int reg)
580 } 580 }
581} 581}
582 582
583static struct regmap_config twl6040_regmap_config = { 583static const struct regmap_config twl6040_regmap_config = {
584 .reg_bits = 8, 584 .reg_bits = 8,
585 .val_bits = 8, 585 .val_bits = 8,
586 586
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 6ca9d25cc3f0..53ae5af5d6e4 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -36,12 +36,12 @@
36static const struct mfd_cell wm8994_regulator_devs[] = { 36static const struct mfd_cell wm8994_regulator_devs[] = {
37 { 37 {
38 .name = "wm8994-ldo", 38 .name = "wm8994-ldo",
39 .id = 1, 39 .id = 0,
40 .pm_runtime_no_callbacks = true, 40 .pm_runtime_no_callbacks = true,
41 }, 41 },
42 { 42 {
43 .name = "wm8994-ldo", 43 .name = "wm8994-ldo",
44 .id = 2, 44 .id = 1,
45 .pm_runtime_no_callbacks = true, 45 .pm_runtime_no_callbacks = true,
46 }, 46 },
47}; 47};
@@ -344,7 +344,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
344 dev_set_drvdata(wm8994->dev, wm8994); 344 dev_set_drvdata(wm8994->dev, wm8994);
345 345
346 /* Add the on-chip regulators first for bootstrapping */ 346 /* Add the on-chip regulators first for bootstrapping */
347 ret = mfd_add_devices(wm8994->dev, -1, 347 ret = mfd_add_devices(wm8994->dev, 0,
348 wm8994_regulator_devs, 348 wm8994_regulator_devs,
349 ARRAY_SIZE(wm8994_regulator_devs), 349 ARRAY_SIZE(wm8994_regulator_devs),
350 NULL, 0, NULL); 350 NULL, 0, NULL);
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 6af0a28ba37d..e8a4218b5726 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -21,8 +21,6 @@
21#include <linux/err.h> 21#include <linux/err.h>
22 22
23#include <linux/clk.h> 23#include <linux/clk.h>
24#include <linux/clk/sunxi.h>
25
26#include <linux/gpio.h> 24#include <linux/gpio.h>
27#include <linux/platform_device.h> 25#include <linux/platform_device.h>
28#include <linux/spinlock.h> 26#include <linux/spinlock.h>
@@ -229,6 +227,8 @@ struct sunxi_mmc_host {
229 /* clock management */ 227 /* clock management */
230 struct clk *clk_ahb; 228 struct clk *clk_ahb;
231 struct clk *clk_mmc; 229 struct clk *clk_mmc;
230 struct clk *clk_sample;
231 struct clk *clk_output;
232 232
233 /* irq */ 233 /* irq */
234 spinlock_t lock; 234 spinlock_t lock;
@@ -653,26 +653,31 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
653 653
654 /* determine delays */ 654 /* determine delays */
655 if (rate <= 400000) { 655 if (rate <= 400000) {
656 oclk_dly = 0; 656 oclk_dly = 180;
657 sclk_dly = 7; 657 sclk_dly = 42;
658 } else if (rate <= 25000000) { 658 } else if (rate <= 25000000) {
659 oclk_dly = 0; 659 oclk_dly = 180;
660 sclk_dly = 5; 660 sclk_dly = 75;
661 } else if (rate <= 50000000) { 661 } else if (rate <= 50000000) {
662 if (ios->timing == MMC_TIMING_UHS_DDR50) { 662 if (ios->timing == MMC_TIMING_UHS_DDR50) {
663 oclk_dly = 2; 663 oclk_dly = 60;
664 sclk_dly = 4; 664 sclk_dly = 120;
665 } else { 665 } else {
666 oclk_dly = 3; 666 oclk_dly = 90;
667 sclk_dly = 5; 667 sclk_dly = 150;
668 } 668 }
669 } else if (rate <= 100000000) {
670 oclk_dly = 6;
671 sclk_dly = 24;
672 } else if (rate <= 200000000) {
673 oclk_dly = 3;
674 sclk_dly = 12;
669 } else { 675 } else {
670 /* rate > 50000000 */ 676 return -EINVAL;
671 oclk_dly = 2;
672 sclk_dly = 4;
673 } 677 }
674 678
675 clk_sunxi_mmc_phase_control(host->clk_mmc, sclk_dly, oclk_dly); 679 clk_set_phase(host->clk_sample, sclk_dly);
680 clk_set_phase(host->clk_output, oclk_dly);
676 681
677 return sunxi_mmc_oclk_onoff(host, 1); 682 return sunxi_mmc_oclk_onoff(host, 1);
678} 683}
@@ -913,6 +918,18 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
913 return PTR_ERR(host->clk_mmc); 918 return PTR_ERR(host->clk_mmc);
914 } 919 }
915 920
921 host->clk_output = devm_clk_get(&pdev->dev, "output");
922 if (IS_ERR(host->clk_output)) {
923 dev_err(&pdev->dev, "Could not get output clock\n");
924 return PTR_ERR(host->clk_output);
925 }
926
927 host->clk_sample = devm_clk_get(&pdev->dev, "sample");
928 if (IS_ERR(host->clk_sample)) {
929 dev_err(&pdev->dev, "Could not get sample clock\n");
930 return PTR_ERR(host->clk_sample);
931 }
932
916 host->reset = devm_reset_control_get(&pdev->dev, "ahb"); 933 host->reset = devm_reset_control_get(&pdev->dev, "ahb");
917 934
918 ret = clk_prepare_enable(host->clk_ahb); 935 ret = clk_prepare_enable(host->clk_ahb);
@@ -927,11 +944,23 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
927 goto error_disable_clk_ahb; 944 goto error_disable_clk_ahb;
928 } 945 }
929 946
947 ret = clk_prepare_enable(host->clk_output);
948 if (ret) {
949 dev_err(&pdev->dev, "Enable output clk err %d\n", ret);
950 goto error_disable_clk_mmc;
951 }
952
953 ret = clk_prepare_enable(host->clk_sample);
954 if (ret) {
955 dev_err(&pdev->dev, "Enable sample clk err %d\n", ret);
956 goto error_disable_clk_output;
957 }
958
930 if (!IS_ERR(host->reset)) { 959 if (!IS_ERR(host->reset)) {
931 ret = reset_control_deassert(host->reset); 960 ret = reset_control_deassert(host->reset);
932 if (ret) { 961 if (ret) {
933 dev_err(&pdev->dev, "reset err %d\n", ret); 962 dev_err(&pdev->dev, "reset err %d\n", ret);
934 goto error_disable_clk_mmc; 963 goto error_disable_clk_sample;
935 } 964 }
936 } 965 }
937 966
@@ -950,6 +979,10 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
950error_assert_reset: 979error_assert_reset:
951 if (!IS_ERR(host->reset)) 980 if (!IS_ERR(host->reset))
952 reset_control_assert(host->reset); 981 reset_control_assert(host->reset);
982error_disable_clk_sample:
983 clk_disable_unprepare(host->clk_sample);
984error_disable_clk_output:
985 clk_disable_unprepare(host->clk_output);
953error_disable_clk_mmc: 986error_disable_clk_mmc:
954 clk_disable_unprepare(host->clk_mmc); 987 clk_disable_unprepare(host->clk_mmc);
955error_disable_clk_ahb: 988error_disable_clk_ahb:
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index cc13ea5ce4d5..c0720c1ee4c9 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -15,6 +15,8 @@
15#include <linux/mtd/mtd.h> 15#include <linux/mtd/mtd.h>
16#include <linux/mtd/partitions.h> 16#include <linux/mtd/partitions.h>
17 17
18#include <uapi/linux/magic.h>
19
18/* 20/*
19 * NAND flash on Netgear R6250 was verified to contain 15 partitions. 21 * NAND flash on Netgear R6250 was verified to contain 15 partitions.
20 * This will result in allocating too big array for some old devices, but the 22 * This will result in allocating too big array for some old devices, but the
@@ -39,7 +41,8 @@
39#define ML_MAGIC1 0x39685a42 41#define ML_MAGIC1 0x39685a42
40#define ML_MAGIC2 0x26594131 42#define ML_MAGIC2 0x26594131
41#define TRX_MAGIC 0x30524448 43#define TRX_MAGIC 0x30524448
42#define SQSH_MAGIC 0x71736873 /* shsq */ 44#define SHSQ_MAGIC 0x71736873 /* shsq (weird ZTE H218N endianness) */
45#define UBI_EC_MAGIC 0x23494255 /* UBI# */
43 46
44struct trx_header { 47struct trx_header {
45 uint32_t magic; 48 uint32_t magic;
@@ -50,7 +53,7 @@ struct trx_header {
50 uint32_t offset[3]; 53 uint32_t offset[3];
51} __packed; 54} __packed;
52 55
53static void bcm47xxpart_add_part(struct mtd_partition *part, char *name, 56static void bcm47xxpart_add_part(struct mtd_partition *part, const char *name,
54 u64 offset, uint32_t mask_flags) 57 u64 offset, uint32_t mask_flags)
55{ 58{
56 part->name = name; 59 part->name = name;
@@ -58,6 +61,26 @@ static void bcm47xxpart_add_part(struct mtd_partition *part, char *name,
58 part->mask_flags = mask_flags; 61 part->mask_flags = mask_flags;
59} 62}
60 63
64static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
65 size_t offset)
66{
67 uint32_t buf;
68 size_t bytes_read;
69
70 if (mtd_read(master, offset, sizeof(buf), &bytes_read,
71 (uint8_t *)&buf) < 0) {
72 pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
73 offset);
74 goto out_default;
75 }
76
77 if (buf == UBI_EC_MAGIC)
78 return "ubi";
79
80out_default:
81 return "rootfs";
82}
83
61static int bcm47xxpart_parse(struct mtd_info *master, 84static int bcm47xxpart_parse(struct mtd_info *master,
62 struct mtd_partition **pparts, 85 struct mtd_partition **pparts,
63 struct mtd_part_parser_data *data) 86 struct mtd_part_parser_data *data)
@@ -73,8 +96,12 @@ static int bcm47xxpart_parse(struct mtd_info *master,
73 int last_trx_part = -1; 96 int last_trx_part = -1;
74 int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, }; 97 int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
75 98
76 if (blocksize <= 0x10000) 99 /*
77 blocksize = 0x10000; 100 * Some really old flashes (like AT45DB*) had smaller erasesize-s, but
101 * partitions were aligned to at least 0x1000 anyway.
102 */
103 if (blocksize < 0x1000)
104 blocksize = 0x1000;
78 105
79 /* Alloc */ 106 /* Alloc */
80 parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS, 107 parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
@@ -186,8 +213,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
186 * we want to have jffs2 (overlay) in the same mtd. 213 * we want to have jffs2 (overlay) in the same mtd.
187 */ 214 */
188 if (trx->offset[i]) { 215 if (trx->offset[i]) {
216 const char *name;
217
218 name = bcm47xxpart_trx_data_part_name(master, offset + trx->offset[i]);
189 bcm47xxpart_add_part(&parts[curr_part++], 219 bcm47xxpart_add_part(&parts[curr_part++],
190 "rootfs", 220 name,
191 offset + trx->offset[i], 221 offset + trx->offset[i],
192 0); 222 0);
193 i++; 223 i++;
@@ -205,7 +235,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
205 } 235 }
206 236
207 /* Squashfs on devices not using TRX */ 237 /* Squashfs on devices not using TRX */
208 if (buf[0x000 / 4] == SQSH_MAGIC) { 238 if (le32_to_cpu(buf[0x000 / 4]) == SQUASHFS_MAGIC ||
239 buf[0x000 / 4] == SHSQ_MAGIC) {
209 bcm47xxpart_add_part(&parts[curr_part++], "rootfs", 240 bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
210 offset, 0); 241 offset, 0);
211 continue; 242 continue;
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index 991c2a1c05d3..afb43d5e1782 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -68,6 +68,7 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
68 mtd->_get_unmapped_area = mapram_unmapped_area; 68 mtd->_get_unmapped_area = mapram_unmapped_area;
69 mtd->_read = mapram_read; 69 mtd->_read = mapram_read;
70 mtd->_write = mapram_write; 70 mtd->_write = mapram_write;
71 mtd->_panic_write = mapram_write;
71 mtd->_sync = mapram_nop; 72 mtd->_sync = mapram_nop;
72 mtd->flags = MTD_CAP_RAM; 73 mtd->flags = MTD_CAP_RAM;
73 mtd->writesize = 1; 74 mtd->writesize = 1;
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index 47a43cf7e5c6..e67f73ab44c9 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -11,6 +11,7 @@
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/of.h>
14#include <linux/mtd/mtd.h> 15#include <linux/mtd/mtd.h>
15#include <linux/mtd/map.h> 16#include <linux/mtd/map.h>
16 17
@@ -28,6 +29,15 @@ static struct mtd_chip_driver maprom_chipdrv = {
28 .module = THIS_MODULE 29 .module = THIS_MODULE
29}; 30};
30 31
32static unsigned int default_erasesize(struct map_info *map)
33{
34 const __be32 *erase_size = NULL;
35
36 erase_size = of_get_property(map->device_node, "erase-size", NULL);
37
38 return !erase_size ? map->size : be32_to_cpu(*erase_size);
39}
40
31static struct mtd_info *map_rom_probe(struct map_info *map) 41static struct mtd_info *map_rom_probe(struct map_info *map)
32{ 42{
33 struct mtd_info *mtd; 43 struct mtd_info *mtd;
@@ -47,8 +57,9 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
47 mtd->_sync = maprom_nop; 57 mtd->_sync = maprom_nop;
48 mtd->_erase = maprom_erase; 58 mtd->_erase = maprom_erase;
49 mtd->flags = MTD_CAP_ROM; 59 mtd->flags = MTD_CAP_ROM;
50 mtd->erasesize = map->size; 60 mtd->erasesize = default_erasesize(map);
51 mtd->writesize = 1; 61 mtd->writesize = 1;
62 mtd->writebufsize = 1;
52 63
53 __module_get(THIS_MODULE); 64 __module_get(THIS_MODULE);
54 return mtd; 65 return mtd;
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index 54ffe5223e64..3060025c8af4 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -24,6 +24,7 @@
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/of.h> 26#include <linux/of.h>
27#include <linux/clk.h>
27 28
28#include "serial_flash_cmds.h" 29#include "serial_flash_cmds.h"
29 30
@@ -262,6 +263,7 @@ struct stfsm {
262 struct mtd_info mtd; 263 struct mtd_info mtd;
263 struct mutex lock; 264 struct mutex lock;
264 struct flash_info *info; 265 struct flash_info *info;
266 struct clk *clk;
265 267
266 uint32_t configuration; 268 uint32_t configuration;
267 uint32_t fifo_dir_delay; 269 uint32_t fifo_dir_delay;
@@ -663,6 +665,23 @@ static struct stfsm_seq stfsm_seq_write_status = {
663 SEQ_CFG_STARTSEQ), 665 SEQ_CFG_STARTSEQ),
664}; 666};
665 667
668/* Dummy sequence to read one byte of data from flash into the FIFO */
669static const struct stfsm_seq stfsm_seq_load_fifo_byte = {
670 .data_size = TRANSFER_SIZE(1),
671 .seq_opc[0] = (SEQ_OPC_PADS_1 |
672 SEQ_OPC_CYCLES(8) |
673 SEQ_OPC_OPCODE(SPINOR_OP_RDID)),
674 .seq = {
675 STFSM_INST_CMD1,
676 STFSM_INST_DATA_READ,
677 STFSM_INST_STOP,
678 },
679 .seq_cfg = (SEQ_CFG_PADS_1 |
680 SEQ_CFG_READNOTWRITE |
681 SEQ_CFG_CSDEASSERT |
682 SEQ_CFG_STARTSEQ),
683};
684
666static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq) 685static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq)
667{ 686{
668 seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | 687 seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
@@ -695,22 +714,6 @@ static inline uint32_t stfsm_fifo_available(struct stfsm *fsm)
695 return (readl(fsm->base + SPI_FAST_SEQ_STA) >> 5) & 0x7f; 714 return (readl(fsm->base + SPI_FAST_SEQ_STA) >> 5) & 0x7f;
696} 715}
697 716
698static void stfsm_clear_fifo(struct stfsm *fsm)
699{
700 uint32_t avail;
701
702 for (;;) {
703 avail = stfsm_fifo_available(fsm);
704 if (!avail)
705 break;
706
707 while (avail) {
708 readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
709 avail--;
710 }
711 }
712}
713
714static inline void stfsm_load_seq(struct stfsm *fsm, 717static inline void stfsm_load_seq(struct stfsm *fsm,
715 const struct stfsm_seq *seq) 718 const struct stfsm_seq *seq)
716{ 719{
@@ -772,6 +775,68 @@ static void stfsm_read_fifo(struct stfsm *fsm, uint32_t *buf, uint32_t size)
772 } 775 }
773} 776}
774 777
778/*
779 * Clear the data FIFO
780 *
781 * Typically, this is only required during driver initialisation, where no
782 * assumptions can be made regarding the state of the FIFO.
783 *
784 * The process of clearing the FIFO is complicated by fact that while it is
785 * possible for the FIFO to contain an arbitrary number of bytes [1], the
786 * SPI_FAST_SEQ_STA register only reports the number of complete 32-bit words
787 * present. Furthermore, data can only be drained from the FIFO by reading
788 * complete 32-bit words.
789 *
790 * With this in mind, a two stage process is used to the clear the FIFO:
791 *
792 * 1. Read any complete 32-bit words from the FIFO, as reported by the
793 * SPI_FAST_SEQ_STA register.
794 *
795 * 2. Mop up any remaining bytes. At this point, it is not known if there
796 * are 0, 1, 2, or 3 bytes in the FIFO. To handle all cases, a dummy FSM
797 * sequence is used to load one byte at a time, until a complete 32-bit
798 * word is formed; at most, 4 bytes will need to be loaded.
799 *
800 * [1] It is theoretically possible for the FIFO to contain an arbitrary number
801 * of bits. However, since there are no known use-cases that leave
802 * incomplete bytes in the FIFO, only words and bytes are considered here.
803 */
804static void stfsm_clear_fifo(struct stfsm *fsm)
805{
806 const struct stfsm_seq *seq = &stfsm_seq_load_fifo_byte;
807 uint32_t words, i;
808
809 /* 1. Clear any 32-bit words */
810 words = stfsm_fifo_available(fsm);
811 if (words) {
812 for (i = 0; i < words; i++)
813 readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
814 dev_dbg(fsm->dev, "cleared %d words from FIFO\n", words);
815 }
816
817 /*
818 * 2. Clear any remaining bytes
819 * - Load the FIFO, one byte at a time, until a complete 32-bit word
820 * is available.
821 */
822 for (i = 0, words = 0; i < 4 && !words; i++) {
823 stfsm_load_seq(fsm, seq);
824 stfsm_wait_seq(fsm);
825 words = stfsm_fifo_available(fsm);
826 }
827
828 /* - A single word must be available now */
829 if (words != 1) {
830 dev_err(fsm->dev, "failed to clear bytes from the data FIFO\n");
831 return;
832 }
833
834 /* - Read the 32-bit word */
835 readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
836
837 dev_dbg(fsm->dev, "cleared %d byte(s) from the data FIFO\n", 4 - i);
838}
839
775static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf, 840static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf,
776 uint32_t size) 841 uint32_t size)
777{ 842{
@@ -1521,11 +1586,11 @@ static int stfsm_write(struct stfsm *fsm, const uint8_t *buf,
1521 uint32_t size_lb; 1586 uint32_t size_lb;
1522 uint32_t size_mop; 1587 uint32_t size_mop;
1523 uint32_t tmp[4]; 1588 uint32_t tmp[4];
1589 uint32_t i;
1524 uint32_t page_buf[FLASH_PAGESIZE_32]; 1590 uint32_t page_buf[FLASH_PAGESIZE_32];
1525 uint8_t *t = (uint8_t *)&tmp; 1591 uint8_t *t = (uint8_t *)&tmp;
1526 const uint8_t *p; 1592 const uint8_t *p;
1527 int ret; 1593 int ret;
1528 int i;
1529 1594
1530 dev_dbg(fsm->dev, "writing %d bytes to 0x%08x\n", size, offset); 1595 dev_dbg(fsm->dev, "writing %d bytes to 0x%08x\n", size, offset);
1531 1596
@@ -1843,8 +1908,7 @@ static void stfsm_set_freq(struct stfsm *fsm, uint32_t spi_freq)
1843 uint32_t emi_freq; 1908 uint32_t emi_freq;
1844 uint32_t clk_div; 1909 uint32_t clk_div;
1845 1910
1846 /* TODO: Make this dynamic */ 1911 emi_freq = clk_get_rate(fsm->clk);
1847 emi_freq = STFSM_DEFAULT_EMI_FREQ;
1848 1912
1849 /* 1913 /*
1850 * Calculate clk_div - values between 2 and 128 1914 * Calculate clk_div - values between 2 and 128
@@ -1994,6 +2058,18 @@ static int stfsm_probe(struct platform_device *pdev)
1994 return PTR_ERR(fsm->base); 2058 return PTR_ERR(fsm->base);
1995 } 2059 }
1996 2060
2061 fsm->clk = devm_clk_get(&pdev->dev, NULL);
2062 if (IS_ERR(fsm->clk)) {
2063 dev_err(fsm->dev, "Couldn't find EMI clock.\n");
2064 return PTR_ERR(fsm->clk);
2065 }
2066
2067 ret = clk_prepare_enable(fsm->clk);
2068 if (ret) {
2069 dev_err(fsm->dev, "Failed to enable EMI clock.\n");
2070 return ret;
2071 }
2072
1997 mutex_init(&fsm->lock); 2073 mutex_init(&fsm->lock);
1998 2074
1999 ret = stfsm_init(fsm); 2075 ret = stfsm_init(fsm);
@@ -2058,6 +2134,28 @@ static int stfsm_remove(struct platform_device *pdev)
2058 return mtd_device_unregister(&fsm->mtd); 2134 return mtd_device_unregister(&fsm->mtd);
2059} 2135}
2060 2136
2137#ifdef CONFIG_PM_SLEEP
2138static int stfsmfsm_suspend(struct device *dev)
2139{
2140 struct stfsm *fsm = dev_get_drvdata(dev);
2141
2142 clk_disable_unprepare(fsm->clk);
2143
2144 return 0;
2145}
2146
2147static int stfsmfsm_resume(struct device *dev)
2148{
2149 struct stfsm *fsm = dev_get_drvdata(dev);
2150
2151 clk_prepare_enable(fsm->clk);
2152
2153 return 0;
2154}
2155#endif
2156
2157static SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume);
2158
2061static const struct of_device_id stfsm_match[] = { 2159static const struct of_device_id stfsm_match[] = {
2062 { .compatible = "st,spi-fsm", }, 2160 { .compatible = "st,spi-fsm", },
2063 {}, 2161 {},
@@ -2070,6 +2168,7 @@ static struct platform_driver stfsm_driver = {
2070 .driver = { 2168 .driver = {
2071 .name = "st-spi-fsm", 2169 .name = "st-spi-fsm",
2072 .of_match_table = stfsm_match, 2170 .of_match_table = stfsm_match,
2171 .pm = &stfsm_pm_ops,
2073 }, 2172 },
2074}; 2173};
2075module_platform_driver(stfsm_driver); 2174module_platform_driver(stfsm_driver);
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index f35cd2081314..ff26e979b1a1 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -269,6 +269,16 @@ static int of_flash_probe(struct platform_device *dev)
269 info->list[i].mtd = obsolete_probe(dev, 269 info->list[i].mtd = obsolete_probe(dev,
270 &info->list[i].map); 270 &info->list[i].map);
271 } 271 }
272
273 /* Fall back to mapping region as ROM */
274 if (!info->list[i].mtd) {
275 dev_warn(&dev->dev,
276 "do_map_probe() failed for type %s\n",
277 probe_type);
278
279 info->list[i].mtd = do_map_probe("map_rom",
280 &info->list[i].map);
281 }
272 mtd_list[i] = info->list[i].mtd; 282 mtd_list[i] = info->list[i].mtd;
273 283
274 err = -ENXIO; 284 err = -ENXIO;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 485ea751c7f9..bb4c14f83c75 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -45,8 +45,6 @@ struct mtdblk_dev {
45 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; 45 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
46}; 46};
47 47
48static DEFINE_MUTEX(mtdblks_lock);
49
50/* 48/*
51 * Cache stuff... 49 * Cache stuff...
52 * 50 *
@@ -286,10 +284,8 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
286 284
287 pr_debug("mtdblock_open\n"); 285 pr_debug("mtdblock_open\n");
288 286
289 mutex_lock(&mtdblks_lock);
290 if (mtdblk->count) { 287 if (mtdblk->count) {
291 mtdblk->count++; 288 mtdblk->count++;
292 mutex_unlock(&mtdblks_lock);
293 return 0; 289 return 0;
294 } 290 }
295 291
@@ -302,8 +298,6 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
302 mtdblk->cache_data = NULL; 298 mtdblk->cache_data = NULL;
303 } 299 }
304 300
305 mutex_unlock(&mtdblks_lock);
306
307 pr_debug("ok\n"); 301 pr_debug("ok\n");
308 302
309 return 0; 303 return 0;
@@ -315,8 +309,6 @@ static void mtdblock_release(struct mtd_blktrans_dev *mbd)
315 309
316 pr_debug("mtdblock_release\n"); 310 pr_debug("mtdblock_release\n");
317 311
318 mutex_lock(&mtdblks_lock);
319
320 mutex_lock(&mtdblk->cache_mutex); 312 mutex_lock(&mtdblk->cache_mutex);
321 write_cached_data(mtdblk); 313 write_cached_data(mtdblk);
322 mutex_unlock(&mtdblk->cache_mutex); 314 mutex_unlock(&mtdblk->cache_mutex);
@@ -331,8 +323,6 @@ static void mtdblock_release(struct mtd_blktrans_dev *mbd)
331 vfree(mtdblk->cache_data); 323 vfree(mtdblk->cache_data);
332 } 324 }
333 325
334 mutex_unlock(&mtdblks_lock);
335
336 pr_debug("ok\n"); 326 pr_debug("ok\n");
337} 327}
338 328
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index eacc3aac7327..239a8c806b67 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -311,7 +311,8 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
311 devops.len = subdev->size - to; 311 devops.len = subdev->size - to;
312 312
313 err = mtd_write_oob(subdev, to, &devops); 313 err = mtd_write_oob(subdev, to, &devops);
314 ops->retlen += devops.oobretlen; 314 ops->retlen += devops.retlen;
315 ops->oobretlen += devops.oobretlen;
315 if (err) 316 if (err)
316 return err; 317 return err;
317 318
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 0ec4d6ea1e4b..11883bd26d9d 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -37,6 +37,7 @@
37#include <linux/backing-dev.h> 37#include <linux/backing-dev.h>
38#include <linux/gfp.h> 38#include <linux/gfp.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/reboot.h>
40 41
41#include <linux/mtd/mtd.h> 42#include <linux/mtd/mtd.h>
42#include <linux/mtd/partitions.h> 43#include <linux/mtd/partitions.h>
@@ -356,6 +357,17 @@ unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
356EXPORT_SYMBOL_GPL(mtd_mmap_capabilities); 357EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
357#endif 358#endif
358 359
360static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
361 void *cmd)
362{
363 struct mtd_info *mtd;
364
365 mtd = container_of(n, struct mtd_info, reboot_notifier);
366 mtd->_reboot(mtd);
367
368 return NOTIFY_DONE;
369}
370
359/** 371/**
360 * add_mtd_device - register an MTD device 372 * add_mtd_device - register an MTD device
361 * @mtd: pointer to new MTD device info structure 373 * @mtd: pointer to new MTD device info structure
@@ -544,6 +556,19 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
544 err = -ENODEV; 556 err = -ENODEV;
545 } 557 }
546 558
559 /*
560 * FIXME: some drivers unfortunately call this function more than once.
561 * So we have to check if we've already assigned the reboot notifier.
562 *
563 * Generally, we can make multiple calls work for most cases, but it
564 * does cause problems with parse_mtd_partitions() above (e.g.,
565 * cmdlineparts will register partitions more than once).
566 */
567 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
568 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
569 register_reboot_notifier(&mtd->reboot_notifier);
570 }
571
547 return err; 572 return err;
548} 573}
549EXPORT_SYMBOL_GPL(mtd_device_parse_register); 574EXPORT_SYMBOL_GPL(mtd_device_parse_register);
@@ -558,6 +583,9 @@ int mtd_device_unregister(struct mtd_info *master)
558{ 583{
559 int err; 584 int err;
560 585
586 if (master->_reboot)
587 unregister_reboot_notifier(&master->reboot_notifier);
588
561 err = del_mtd_partitions(master); 589 err = del_mtd_partitions(master);
562 if (err) 590 if (err)
563 return err; 591 return err;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7d0150d20432..5b76a173cd95 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -421,7 +421,7 @@ config MTD_NAND_ORION
421 421
422config MTD_NAND_FSL_ELBC 422config MTD_NAND_FSL_ELBC
423 tristate "NAND support for Freescale eLBC controllers" 423 tristate "NAND support for Freescale eLBC controllers"
424 depends on PPC_OF 424 depends on PPC
425 select FSL_LBC 425 select FSL_LBC
426 help 426 help
427 Various Freescale chips, including the 8313, include a NAND Flash 427 Various Freescale chips, including the 8313, include a NAND Flash
@@ -524,4 +524,9 @@ config MTD_NAND_SUNXI
524 help 524 help
525 Enables support for NAND Flash chips on Allwinner SoCs. 525 Enables support for NAND Flash chips on Allwinner SoCs.
526 526
527config MTD_NAND_HISI504
528 tristate "Support for NAND controller on Hisilicon SoC Hip04"
529 help
530 Enables support for NAND controller on Hisilicon SoC Hip04.
531
527endif # MTD_NAND 532endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index bd38f21d2e28..582bbd05aff7 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -51,5 +51,6 @@ obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
51obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o 51obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
52obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/ 52obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
53obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o 53obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
54obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
54 55
55nand-objs := nand_base.o nand_bbt.o nand_timings.o 56nand-objs := nand_base.o nand_bbt.o nand_timings.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index f1d555cfb332..842f8fe91b56 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -183,7 +183,7 @@ static int ams_delta_init(struct platform_device *pdev)
183 return -ENXIO; 183 return -ENXIO;
184 184
185 /* Allocate memory for MTD device structure and private data */ 185 /* Allocate memory for MTD device structure and private data */
186 ams_delta_mtd = kmalloc(sizeof(struct mtd_info) + 186 ams_delta_mtd = kzalloc(sizeof(struct mtd_info) +
187 sizeof(struct nand_chip), GFP_KERNEL); 187 sizeof(struct nand_chip), GFP_KERNEL);
188 if (!ams_delta_mtd) { 188 if (!ams_delta_mtd) {
189 printk (KERN_WARNING "Unable to allocate E3 NAND MTD device structure.\n"); 189 printk (KERN_WARNING "Unable to allocate E3 NAND MTD device structure.\n");
@@ -196,10 +196,6 @@ static int ams_delta_init(struct platform_device *pdev)
196 /* Get pointer to private data */ 196 /* Get pointer to private data */
197 this = (struct nand_chip *) (&ams_delta_mtd[1]); 197 this = (struct nand_chip *) (&ams_delta_mtd[1]);
198 198
199 /* Initialize structures */
200 memset(ams_delta_mtd, 0, sizeof(struct mtd_info));
201 memset(this, 0, sizeof(struct nand_chip));
202
203 /* Link the private data with the MTD structure */ 199 /* Link the private data with the MTD structure */
204 ams_delta_mtd->priv = this; 200 ams_delta_mtd->priv = this;
205 201
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index a345e7b2463a..d93c849b70b5 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -63,6 +63,10 @@ module_param(on_flash_bbt, int, 0);
63#include "atmel_nand_ecc.h" /* Hardware ECC registers */ 63#include "atmel_nand_ecc.h" /* Hardware ECC registers */
64#include "atmel_nand_nfc.h" /* Nand Flash Controller definition */ 64#include "atmel_nand_nfc.h" /* Nand Flash Controller definition */
65 65
66struct atmel_nand_caps {
67 bool pmecc_correct_erase_page;
68};
69
66/* oob layout for large page size 70/* oob layout for large page size
67 * bad block info is on bytes 0 and 1 71 * bad block info is on bytes 0 and 1
68 * the bytes have to be consecutives to avoid 72 * the bytes have to be consecutives to avoid
@@ -124,6 +128,7 @@ struct atmel_nand_host {
124 128
125 struct atmel_nfc *nfc; 129 struct atmel_nfc *nfc;
126 130
131 struct atmel_nand_caps *caps;
127 bool has_pmecc; 132 bool has_pmecc;
128 u8 pmecc_corr_cap; 133 u8 pmecc_corr_cap;
129 u16 pmecc_sector_size; 134 u16 pmecc_sector_size;
@@ -847,7 +852,11 @@ static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
847 struct atmel_nand_host *host = nand_chip->priv; 852 struct atmel_nand_host *host = nand_chip->priv;
848 int i, err_nbr; 853 int i, err_nbr;
849 uint8_t *buf_pos; 854 uint8_t *buf_pos;
850 int total_err = 0; 855 int max_bitflips = 0;
856
857 /* If can correct bitfilps from erased page, do the normal check */
858 if (host->caps->pmecc_correct_erase_page)
859 goto normal_check;
851 860
852 for (i = 0; i < nand_chip->ecc.total; i++) 861 for (i = 0; i < nand_chip->ecc.total; i++)
853 if (ecc[i] != 0xff) 862 if (ecc[i] != 0xff)
@@ -874,13 +883,13 @@ normal_check:
874 pmecc_correct_data(mtd, buf_pos, ecc, i, 883 pmecc_correct_data(mtd, buf_pos, ecc, i,
875 nand_chip->ecc.bytes, err_nbr); 884 nand_chip->ecc.bytes, err_nbr);
876 mtd->ecc_stats.corrected += err_nbr; 885 mtd->ecc_stats.corrected += err_nbr;
877 total_err += err_nbr; 886 max_bitflips = max_t(int, max_bitflips, err_nbr);
878 } 887 }
879 } 888 }
880 pmecc_stat >>= 1; 889 pmecc_stat >>= 1;
881 } 890 }
882 891
883 return total_err; 892 return max_bitflips;
884} 893}
885 894
886static void pmecc_enable(struct atmel_nand_host *host, int ecc_op) 895static void pmecc_enable(struct atmel_nand_host *host, int ecc_op)
@@ -1474,6 +1483,8 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
1474 ecc_writel(host->ecc, CR, ATMEL_ECC_RST); 1483 ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
1475} 1484}
1476 1485
1486static const struct of_device_id atmel_nand_dt_ids[];
1487
1477static int atmel_of_init_port(struct atmel_nand_host *host, 1488static int atmel_of_init_port(struct atmel_nand_host *host,
1478 struct device_node *np) 1489 struct device_node *np)
1479{ 1490{
@@ -1483,6 +1494,9 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
1483 struct atmel_nand_data *board = &host->board; 1494 struct atmel_nand_data *board = &host->board;
1484 enum of_gpio_flags flags = 0; 1495 enum of_gpio_flags flags = 0;
1485 1496
1497 host->caps = (struct atmel_nand_caps *)
1498 of_match_device(atmel_nand_dt_ids, host->dev)->data;
1499
1486 if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) { 1500 if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
1487 if (val >= 32) { 1501 if (val >= 32) {
1488 dev_err(host->dev, "invalid addr-offset %u\n", val); 1502 dev_err(host->dev, "invalid addr-offset %u\n", val);
@@ -2288,8 +2302,17 @@ static int atmel_nand_remove(struct platform_device *pdev)
2288 return 0; 2302 return 0;
2289} 2303}
2290 2304
2305static struct atmel_nand_caps at91rm9200_caps = {
2306 .pmecc_correct_erase_page = false,
2307};
2308
2309static struct atmel_nand_caps sama5d4_caps = {
2310 .pmecc_correct_erase_page = true,
2311};
2312
2291static const struct of_device_id atmel_nand_dt_ids[] = { 2313static const struct of_device_id atmel_nand_dt_ids[] = {
2292 { .compatible = "atmel,at91rm9200-nand" }, 2314 { .compatible = "atmel,at91rm9200-nand", .data = &at91rm9200_caps },
2315 { .compatible = "atmel,sama5d4-nand", .data = &sama5d4_caps },
2293 { /* sentinel */ } 2316 { /* sentinel */ }
2294}; 2317};
2295 2318
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index b3b7ca1bafb8..f44c6061536a 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1041,7 +1041,7 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op)
1041 index_addr(denali, mode | ((addr >> 16) << 8), 0x2200); 1041 index_addr(denali, mode | ((addr >> 16) << 8), 0x2200);
1042 1042
1043 /* 3. set memory low address bits 23:8 */ 1043 /* 3. set memory low address bits 23:8 */
1044 index_addr(denali, mode | ((addr & 0xff) << 8), 0x2300); 1044 index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300);
1045 1045
1046 /* 4. interrupt when complete, burst len = 64 bytes */ 1046 /* 4. interrupt when complete, burst len = 64 bytes */
1047 index_addr(denali, mode | 0x14000, 0x2400); 1047 index_addr(denali, mode | 0x14000, 0x2400);
@@ -1328,35 +1328,6 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1328 break; 1328 break;
1329 } 1329 }
1330} 1330}
1331
1332/* stubs for ECC functions not used by the NAND core */
1333static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
1334 uint8_t *ecc_code)
1335{
1336 struct denali_nand_info *denali = mtd_to_denali(mtd);
1337
1338 dev_err(denali->dev, "denali_ecc_calculate called unexpectedly\n");
1339 BUG();
1340 return -EIO;
1341}
1342
1343static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
1344 uint8_t *read_ecc, uint8_t *calc_ecc)
1345{
1346 struct denali_nand_info *denali = mtd_to_denali(mtd);
1347
1348 dev_err(denali->dev, "denali_ecc_correct called unexpectedly\n");
1349 BUG();
1350 return -EIO;
1351}
1352
1353static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
1354{
1355 struct denali_nand_info *denali = mtd_to_denali(mtd);
1356
1357 dev_err(denali->dev, "denali_ecc_hwctl called unexpectedly\n");
1358 BUG();
1359}
1360/* end NAND core entry points */ 1331/* end NAND core entry points */
1361 1332
1362/* Initialization code to bring the device up to a known good state */ 1333/* Initialization code to bring the device up to a known good state */
@@ -1609,15 +1580,6 @@ int denali_init(struct denali_nand_info *denali)
1609 denali->totalblks = denali->mtd.size >> denali->nand.phys_erase_shift; 1580 denali->totalblks = denali->mtd.size >> denali->nand.phys_erase_shift;
1610 denali->blksperchip = denali->totalblks / denali->nand.numchips; 1581 denali->blksperchip = denali->totalblks / denali->nand.numchips;
1611 1582
1612 /*
1613 * These functions are required by the NAND core framework, otherwise,
1614 * the NAND core will assert. However, we don't need them, so we'll stub
1615 * them out.
1616 */
1617 denali->nand.ecc.calculate = denali_ecc_calculate;
1618 denali->nand.ecc.correct = denali_ecc_correct;
1619 denali->nand.ecc.hwctl = denali_ecc_hwctl;
1620
1621 /* override the default read operations */ 1583 /* override the default read operations */
1622 denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum; 1584 denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
1623 denali->nand.ecc.read_page = denali_read_page; 1585 denali->nand.ecc.read_page = denali_read_page;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 4f3851a24bb2..33f3c3c54dbc 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1294,14 +1294,6 @@ exit_auxiliary:
1294 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an 1294 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1295 * ECC-based or raw view of the page is implicit in which function it calls 1295 * ECC-based or raw view of the page is implicit in which function it calls
1296 * (there is a similar pair of ECC-based/raw functions for writing). 1296 * (there is a similar pair of ECC-based/raw functions for writing).
1297 *
1298 * FIXME: The following paragraph is incorrect, now that there exist
1299 * ecc.read_oob_raw and ecc.write_oob_raw functions.
1300 *
1301 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1302 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1303 * caller wants an ECC-based or raw view of the page is not propagated down to
1304 * this driver.
1305 */ 1297 */
1306static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 1298static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1307 int page) 1299 int page)
@@ -2029,7 +2021,6 @@ static int gpmi_nand_probe(struct platform_device *pdev)
2029exit_nfc_init: 2021exit_nfc_init:
2030 release_resources(this); 2022 release_resources(this);
2031exit_acquire_resources: 2023exit_acquire_resources:
2032 dev_err(this->dev, "driver registration failed: %d\n", ret);
2033 2024
2034 return ret; 2025 return ret;
2035} 2026}
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
new file mode 100644
index 000000000000..289ad3ac3e80
--- /dev/null
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -0,0 +1,891 @@
1/*
2 * Hisilicon NAND Flash controller driver
3 *
4 * Copyright © 2012-2014 HiSilicon Technologies Co., Ltd.
5 * http://www.hisilicon.com
6 *
7 * Author: Zhou Wang <wangzhou.bry@gmail.com>
8 * The initial developer of the original code is Zhiyong Cai
9 * <caizhiyong@huawei.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
21#include <linux/of.h>
22#include <linux/of_mtd.h>
23#include <linux/mtd/mtd.h>
24#include <linux/sizes.h>
25#include <linux/clk.h>
26#include <linux/slab.h>
27#include <linux/module.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/mtd/nand.h>
31#include <linux/dma-mapping.h>
32#include <linux/platform_device.h>
33#include <linux/mtd/partitions.h>
34
35#define HINFC504_MAX_CHIP (4)
36#define HINFC504_W_LATCH (5)
37#define HINFC504_R_LATCH (7)
38#define HINFC504_RW_LATCH (3)
39
40#define HINFC504_NFC_TIMEOUT (2 * HZ)
41#define HINFC504_NFC_PM_TIMEOUT (1 * HZ)
42#define HINFC504_NFC_DMA_TIMEOUT (5 * HZ)
43#define HINFC504_CHIP_DELAY (25)
44
45#define HINFC504_REG_BASE_ADDRESS_LEN (0x100)
46#define HINFC504_BUFFER_BASE_ADDRESS_LEN (2048 + 128)
47
48#define HINFC504_ADDR_CYCLE_MASK 0x4
49
50#define HINFC504_CON 0x00
51#define HINFC504_CON_OP_MODE_NORMAL BIT(0)
52#define HINFC504_CON_PAGEISZE_SHIFT (1)
53#define HINFC504_CON_PAGESIZE_MASK (0x07)
54#define HINFC504_CON_BUS_WIDTH BIT(4)
55#define HINFC504_CON_READY_BUSY_SEL BIT(8)
56#define HINFC504_CON_ECCTYPE_SHIFT (9)
57#define HINFC504_CON_ECCTYPE_MASK (0x07)
58
59#define HINFC504_PWIDTH 0x04
60#define SET_HINFC504_PWIDTH(_w_lcnt, _r_lcnt, _rw_hcnt) \
61 ((_w_lcnt) | (((_r_lcnt) & 0x0F) << 4) | (((_rw_hcnt) & 0x0F) << 8))
62
63#define HINFC504_CMD 0x0C
64#define HINFC504_ADDRL 0x10
65#define HINFC504_ADDRH 0x14
66#define HINFC504_DATA_NUM 0x18
67
68#define HINFC504_OP 0x1C
69#define HINFC504_OP_READ_DATA_EN BIT(1)
70#define HINFC504_OP_WAIT_READY_EN BIT(2)
71#define HINFC504_OP_CMD2_EN BIT(3)
72#define HINFC504_OP_WRITE_DATA_EN BIT(4)
73#define HINFC504_OP_ADDR_EN BIT(5)
74#define HINFC504_OP_CMD1_EN BIT(6)
75#define HINFC504_OP_NF_CS_SHIFT (7)
76#define HINFC504_OP_NF_CS_MASK (3)
77#define HINFC504_OP_ADDR_CYCLE_SHIFT (9)
78#define HINFC504_OP_ADDR_CYCLE_MASK (7)
79
80#define HINFC504_STATUS 0x20
81#define HINFC504_READY BIT(0)
82
83#define HINFC504_INTEN 0x24
84#define HINFC504_INTEN_DMA BIT(9)
85#define HINFC504_INTEN_UE BIT(6)
86#define HINFC504_INTEN_CE BIT(5)
87
88#define HINFC504_INTS 0x28
89#define HINFC504_INTS_DMA BIT(9)
90#define HINFC504_INTS_UE BIT(6)
91#define HINFC504_INTS_CE BIT(5)
92
93#define HINFC504_INTCLR 0x2C
94#define HINFC504_INTCLR_DMA BIT(9)
95#define HINFC504_INTCLR_UE BIT(6)
96#define HINFC504_INTCLR_CE BIT(5)
97
98#define HINFC504_ECC_STATUS 0x5C
99#define HINFC504_ECC_16_BIT_SHIFT 12
100
101#define HINFC504_DMA_CTRL 0x60
102#define HINFC504_DMA_CTRL_DMA_START BIT(0)
103#define HINFC504_DMA_CTRL_WE BIT(1)
104#define HINFC504_DMA_CTRL_DATA_AREA_EN BIT(2)
105#define HINFC504_DMA_CTRL_OOB_AREA_EN BIT(3)
106#define HINFC504_DMA_CTRL_BURST4_EN BIT(4)
107#define HINFC504_DMA_CTRL_BURST8_EN BIT(5)
108#define HINFC504_DMA_CTRL_BURST16_EN BIT(6)
109#define HINFC504_DMA_CTRL_ADDR_NUM_SHIFT (7)
110#define HINFC504_DMA_CTRL_ADDR_NUM_MASK (1)
111#define HINFC504_DMA_CTRL_CS_SHIFT (8)
112#define HINFC504_DMA_CTRL_CS_MASK (0x03)
113
114#define HINFC504_DMA_ADDR_DATA 0x64
115#define HINFC504_DMA_ADDR_OOB 0x68
116
117#define HINFC504_DMA_LEN 0x6C
118#define HINFC504_DMA_LEN_OOB_SHIFT (16)
119#define HINFC504_DMA_LEN_OOB_MASK (0xFFF)
120
121#define HINFC504_DMA_PARA 0x70
122#define HINFC504_DMA_PARA_DATA_RW_EN BIT(0)
123#define HINFC504_DMA_PARA_OOB_RW_EN BIT(1)
124#define HINFC504_DMA_PARA_DATA_EDC_EN BIT(2)
125#define HINFC504_DMA_PARA_OOB_EDC_EN BIT(3)
126#define HINFC504_DMA_PARA_DATA_ECC_EN BIT(4)
127#define HINFC504_DMA_PARA_OOB_ECC_EN BIT(5)
128
129#define HINFC_VERSION 0x74
130#define HINFC504_LOG_READ_ADDR 0x7C
131#define HINFC504_LOG_READ_LEN 0x80
132
133#define HINFC504_NANDINFO_LEN 0x10
134
135struct hinfc_host {
136 struct nand_chip chip;
137 struct mtd_info mtd;
138 struct device *dev;
139 void __iomem *iobase;
140 void __iomem *mmio;
141 struct completion cmd_complete;
142 unsigned int offset;
143 unsigned int command;
144 int chipselect;
145 unsigned int addr_cycle;
146 u32 addr_value[2];
147 u32 cache_addr_value[2];
148 char *buffer;
149 dma_addr_t dma_buffer;
150 dma_addr_t dma_oob;
151 int version;
152 unsigned int irq_status; /* interrupt status */
153};
154
155static inline unsigned int hinfc_read(struct hinfc_host *host, unsigned int reg)
156{
157 return readl(host->iobase + reg);
158}
159
160static inline void hinfc_write(struct hinfc_host *host, unsigned int value,
161 unsigned int reg)
162{
163 writel(value, host->iobase + reg);
164}
165
166static void wait_controller_finished(struct hinfc_host *host)
167{
168 unsigned long timeout = jiffies + HINFC504_NFC_TIMEOUT;
169 int val;
170
171 while (time_before(jiffies, timeout)) {
172 val = hinfc_read(host, HINFC504_STATUS);
173 if (host->command == NAND_CMD_ERASE2) {
174 /* nfc is ready */
175 while (!(val & HINFC504_READY)) {
176 usleep_range(500, 1000);
177 val = hinfc_read(host, HINFC504_STATUS);
178 }
179 return;
180 }
181
182 if (val & HINFC504_READY)
183 return;
184 }
185
186 /* wait cmd timeout */
187 dev_err(host->dev, "Wait NAND controller exec cmd timeout.\n");
188}
189
190static void hisi_nfc_dma_transfer(struct hinfc_host *host, int todev)
191{
192 struct mtd_info *mtd = &host->mtd;
193 struct nand_chip *chip = mtd->priv;
194 unsigned long val;
195 int ret;
196
197 hinfc_write(host, host->dma_buffer, HINFC504_DMA_ADDR_DATA);
198 hinfc_write(host, host->dma_oob, HINFC504_DMA_ADDR_OOB);
199
200 if (chip->ecc.mode == NAND_ECC_NONE) {
201 hinfc_write(host, ((mtd->oobsize & HINFC504_DMA_LEN_OOB_MASK)
202 << HINFC504_DMA_LEN_OOB_SHIFT), HINFC504_DMA_LEN);
203
204 hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
205 | HINFC504_DMA_PARA_OOB_RW_EN, HINFC504_DMA_PARA);
206 } else {
207 if (host->command == NAND_CMD_READOOB)
208 hinfc_write(host, HINFC504_DMA_PARA_OOB_RW_EN
209 | HINFC504_DMA_PARA_OOB_EDC_EN
210 | HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
211 else
212 hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
213 | HINFC504_DMA_PARA_OOB_RW_EN
214 | HINFC504_DMA_PARA_DATA_EDC_EN
215 | HINFC504_DMA_PARA_OOB_EDC_EN
216 | HINFC504_DMA_PARA_DATA_ECC_EN
217 | HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
218
219 }
220
221 val = (HINFC504_DMA_CTRL_DMA_START | HINFC504_DMA_CTRL_BURST4_EN
222 | HINFC504_DMA_CTRL_BURST8_EN | HINFC504_DMA_CTRL_BURST16_EN
223 | HINFC504_DMA_CTRL_DATA_AREA_EN | HINFC504_DMA_CTRL_OOB_AREA_EN
224 | ((host->addr_cycle == 4 ? 1 : 0)
225 << HINFC504_DMA_CTRL_ADDR_NUM_SHIFT)
226 | ((host->chipselect & HINFC504_DMA_CTRL_CS_MASK)
227 << HINFC504_DMA_CTRL_CS_SHIFT));
228
229 if (todev)
230 val |= HINFC504_DMA_CTRL_WE;
231
232 init_completion(&host->cmd_complete);
233
234 hinfc_write(host, val, HINFC504_DMA_CTRL);
235 ret = wait_for_completion_timeout(&host->cmd_complete,
236 HINFC504_NFC_DMA_TIMEOUT);
237
238 if (!ret) {
239 dev_err(host->dev, "DMA operation(irq) timeout!\n");
240 /* sanity check */
241 val = hinfc_read(host, HINFC504_DMA_CTRL);
242 if (!(val & HINFC504_DMA_CTRL_DMA_START))
243 dev_err(host->dev, "DMA is already done but without irq ACK!\n");
244 else
245 dev_err(host->dev, "DMA is really timeout!\n");
246 }
247}
248
249static int hisi_nfc_send_cmd_pageprog(struct hinfc_host *host)
250{
251 host->addr_value[0] &= 0xffff0000;
252
253 hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
254 hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
255 hinfc_write(host, NAND_CMD_PAGEPROG << 8 | NAND_CMD_SEQIN,
256 HINFC504_CMD);
257
258 hisi_nfc_dma_transfer(host, 1);
259
260 return 0;
261}
262
263static int hisi_nfc_send_cmd_readstart(struct hinfc_host *host)
264{
265 struct mtd_info *mtd = &host->mtd;
266
267 if ((host->addr_value[0] == host->cache_addr_value[0]) &&
268 (host->addr_value[1] == host->cache_addr_value[1]))
269 return 0;
270
271 host->addr_value[0] &= 0xffff0000;
272
273 hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
274 hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
275 hinfc_write(host, NAND_CMD_READSTART << 8 | NAND_CMD_READ0,
276 HINFC504_CMD);
277
278 hinfc_write(host, 0, HINFC504_LOG_READ_ADDR);
279 hinfc_write(host, mtd->writesize + mtd->oobsize,
280 HINFC504_LOG_READ_LEN);
281
282 hisi_nfc_dma_transfer(host, 0);
283
284 host->cache_addr_value[0] = host->addr_value[0];
285 host->cache_addr_value[1] = host->addr_value[1];
286
287 return 0;
288}
289
290static int hisi_nfc_send_cmd_erase(struct hinfc_host *host)
291{
292 hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
293 hinfc_write(host, (NAND_CMD_ERASE2 << 8) | NAND_CMD_ERASE1,
294 HINFC504_CMD);
295
296 hinfc_write(host, HINFC504_OP_WAIT_READY_EN
297 | HINFC504_OP_CMD2_EN
298 | HINFC504_OP_CMD1_EN
299 | HINFC504_OP_ADDR_EN
300 | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
301 << HINFC504_OP_NF_CS_SHIFT)
302 | ((host->addr_cycle & HINFC504_OP_ADDR_CYCLE_MASK)
303 << HINFC504_OP_ADDR_CYCLE_SHIFT),
304 HINFC504_OP);
305
306 wait_controller_finished(host);
307
308 return 0;
309}
310
311static int hisi_nfc_send_cmd_readid(struct hinfc_host *host)
312{
313 hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
314 hinfc_write(host, NAND_CMD_READID, HINFC504_CMD);
315 hinfc_write(host, 0, HINFC504_ADDRL);
316
317 hinfc_write(host, HINFC504_OP_CMD1_EN | HINFC504_OP_ADDR_EN
318 | HINFC504_OP_READ_DATA_EN
319 | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
320 << HINFC504_OP_NF_CS_SHIFT)
321 | 1 << HINFC504_OP_ADDR_CYCLE_SHIFT, HINFC504_OP);
322
323 wait_controller_finished(host);
324
325 return 0;
326}
327
328static int hisi_nfc_send_cmd_status(struct hinfc_host *host)
329{
330 hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
331 hinfc_write(host, NAND_CMD_STATUS, HINFC504_CMD);
332 hinfc_write(host, HINFC504_OP_CMD1_EN
333 | HINFC504_OP_READ_DATA_EN
334 | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
335 << HINFC504_OP_NF_CS_SHIFT),
336 HINFC504_OP);
337
338 wait_controller_finished(host);
339
340 return 0;
341}
342
343static int hisi_nfc_send_cmd_reset(struct hinfc_host *host, int chipselect)
344{
345 hinfc_write(host, NAND_CMD_RESET, HINFC504_CMD);
346
347 hinfc_write(host, HINFC504_OP_CMD1_EN
348 | ((chipselect & HINFC504_OP_NF_CS_MASK)
349 << HINFC504_OP_NF_CS_SHIFT)
350 | HINFC504_OP_WAIT_READY_EN,
351 HINFC504_OP);
352
353 wait_controller_finished(host);
354
355 return 0;
356}
357
358static void hisi_nfc_select_chip(struct mtd_info *mtd, int chipselect)
359{
360 struct nand_chip *chip = mtd->priv;
361 struct hinfc_host *host = chip->priv;
362
363 if (chipselect < 0)
364 return;
365
366 host->chipselect = chipselect;
367}
368
369static uint8_t hisi_nfc_read_byte(struct mtd_info *mtd)
370{
371 struct nand_chip *chip = mtd->priv;
372 struct hinfc_host *host = chip->priv;
373
374 if (host->command == NAND_CMD_STATUS)
375 return *(uint8_t *)(host->mmio);
376
377 host->offset++;
378
379 if (host->command == NAND_CMD_READID)
380 return *(uint8_t *)(host->mmio + host->offset - 1);
381
382 return *(uint8_t *)(host->buffer + host->offset - 1);
383}
384
385static u16 hisi_nfc_read_word(struct mtd_info *mtd)
386{
387 struct nand_chip *chip = mtd->priv;
388 struct hinfc_host *host = chip->priv;
389
390 host->offset += 2;
391 return *(u16 *)(host->buffer + host->offset - 2);
392}
393
394static void
395hisi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
396{
397 struct nand_chip *chip = mtd->priv;
398 struct hinfc_host *host = chip->priv;
399
400 memcpy(host->buffer + host->offset, buf, len);
401 host->offset += len;
402}
403
404static void hisi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
405{
406 struct nand_chip *chip = mtd->priv;
407 struct hinfc_host *host = chip->priv;
408
409 memcpy(buf, host->buffer + host->offset, len);
410 host->offset += len;
411}
412
413static void set_addr(struct mtd_info *mtd, int column, int page_addr)
414{
415 struct nand_chip *chip = mtd->priv;
416 struct hinfc_host *host = chip->priv;
417 unsigned int command = host->command;
418
419 host->addr_cycle = 0;
420 host->addr_value[0] = 0;
421 host->addr_value[1] = 0;
422
423 /* Serially input address */
424 if (column != -1) {
425 /* Adjust columns for 16 bit buswidth */
426 if (chip->options & NAND_BUSWIDTH_16 &&
427 !nand_opcode_8bits(command))
428 column >>= 1;
429
430 host->addr_value[0] = column & 0xffff;
431 host->addr_cycle = 2;
432 }
433 if (page_addr != -1) {
434 host->addr_value[0] |= (page_addr & 0xffff)
435 << (host->addr_cycle * 8);
436 host->addr_cycle += 2;
437 /* One more address cycle for devices > 128MiB */
438 if (chip->chipsize > (128 << 20)) {
439 host->addr_cycle += 1;
440 if (host->command == NAND_CMD_ERASE1)
441 host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
442 else
443 host->addr_value[1] |= ((page_addr >> 16) & 0xff);
444 }
445 }
446}
447
448static void hisi_nfc_cmdfunc(struct mtd_info *mtd, unsigned command, int column,
449 int page_addr)
450{
451 struct nand_chip *chip = mtd->priv;
452 struct hinfc_host *host = chip->priv;
453 int is_cache_invalid = 1;
454 unsigned int flag = 0;
455
456 host->command = command;
457
458 switch (command) {
459 case NAND_CMD_READ0:
460 case NAND_CMD_READOOB:
461 if (command == NAND_CMD_READ0)
462 host->offset = column;
463 else
464 host->offset = column + mtd->writesize;
465
466 is_cache_invalid = 0;
467 set_addr(mtd, column, page_addr);
468 hisi_nfc_send_cmd_readstart(host);
469 break;
470
471 case NAND_CMD_SEQIN:
472 host->offset = column;
473 set_addr(mtd, column, page_addr);
474 break;
475
476 case NAND_CMD_ERASE1:
477 set_addr(mtd, column, page_addr);
478 break;
479
480 case NAND_CMD_PAGEPROG:
481 hisi_nfc_send_cmd_pageprog(host);
482 break;
483
484 case NAND_CMD_ERASE2:
485 hisi_nfc_send_cmd_erase(host);
486 break;
487
488 case NAND_CMD_READID:
489 host->offset = column;
490 memset(host->mmio, 0, 0x10);
491 hisi_nfc_send_cmd_readid(host);
492 break;
493
494 case NAND_CMD_STATUS:
495 flag = hinfc_read(host, HINFC504_CON);
496 if (chip->ecc.mode == NAND_ECC_HW)
497 hinfc_write(host,
498 flag & ~(HINFC504_CON_ECCTYPE_MASK <<
499 HINFC504_CON_ECCTYPE_SHIFT), HINFC504_CON);
500
501 host->offset = 0;
502 memset(host->mmio, 0, 0x10);
503 hisi_nfc_send_cmd_status(host);
504 hinfc_write(host, flag, HINFC504_CON);
505 break;
506
507 case NAND_CMD_RESET:
508 hisi_nfc_send_cmd_reset(host, host->chipselect);
509 break;
510
511 default:
512 dev_err(host->dev, "Error: unsupported cmd(cmd=%x, col=%x, page=%x)\n",
513 command, column, page_addr);
514 }
515
516 if (is_cache_invalid) {
517 host->cache_addr_value[0] = ~0;
518 host->cache_addr_value[1] = ~0;
519 }
520}
521
522static irqreturn_t hinfc_irq_handle(int irq, void *devid)
523{
524 struct hinfc_host *host = devid;
525 unsigned int flag;
526
527 flag = hinfc_read(host, HINFC504_INTS);
528 /* store interrupts state */
529 host->irq_status |= flag;
530
531 if (flag & HINFC504_INTS_DMA) {
532 hinfc_write(host, HINFC504_INTCLR_DMA, HINFC504_INTCLR);
533 complete(&host->cmd_complete);
534 } else if (flag & HINFC504_INTS_CE) {
535 hinfc_write(host, HINFC504_INTCLR_CE, HINFC504_INTCLR);
536 } else if (flag & HINFC504_INTS_UE) {
537 hinfc_write(host, HINFC504_INTCLR_UE, HINFC504_INTCLR);
538 }
539
540 return IRQ_HANDLED;
541}
542
543static int hisi_nand_read_page_hwecc(struct mtd_info *mtd,
544 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
545{
546 struct hinfc_host *host = chip->priv;
547 int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
548 int stat_1, stat_2;
549
550 chip->read_buf(mtd, buf, mtd->writesize);
551 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
552
553 /* errors which can not be corrected by ECC */
554 if (host->irq_status & HINFC504_INTS_UE) {
555 mtd->ecc_stats.failed++;
556 } else if (host->irq_status & HINFC504_INTS_CE) {
557 /* TODO: need add other ECC modes! */
558 switch (chip->ecc.strength) {
559 case 16:
560 status_ecc = hinfc_read(host, HINFC504_ECC_STATUS) >>
561 HINFC504_ECC_16_BIT_SHIFT & 0x0fff;
562 stat_2 = status_ecc & 0x3f;
563 stat_1 = status_ecc >> 6 & 0x3f;
564 stat = stat_1 + stat_2;
565 stat_max = max_t(int, stat_1, stat_2);
566 }
567 mtd->ecc_stats.corrected += stat;
568 max_bitflips = max_t(int, max_bitflips, stat_max);
569 }
570 host->irq_status = 0;
571
572 return max_bitflips;
573}
574
575static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
576 int page)
577{
578 struct hinfc_host *host = chip->priv;
579
580 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
581 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
582
583 if (host->irq_status & HINFC504_INTS_UE) {
584 host->irq_status = 0;
585 return -EBADMSG;
586 }
587
588 host->irq_status = 0;
589 return 0;
590}
591
592static int hisi_nand_write_page_hwecc(struct mtd_info *mtd,
593 struct nand_chip *chip, const uint8_t *buf, int oob_required)
594{
595 chip->write_buf(mtd, buf, mtd->writesize);
596 if (oob_required)
597 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
598
599 return 0;
600}
601
602static void hisi_nfc_host_init(struct hinfc_host *host)
603{
604 struct nand_chip *chip = &host->chip;
605 unsigned int flag = 0;
606
607 host->version = hinfc_read(host, HINFC_VERSION);
608 host->addr_cycle = 0;
609 host->addr_value[0] = 0;
610 host->addr_value[1] = 0;
611 host->cache_addr_value[0] = ~0;
612 host->cache_addr_value[1] = ~0;
613 host->chipselect = 0;
614
615 /* default page size: 2K, ecc_none. need modify */
616 flag = HINFC504_CON_OP_MODE_NORMAL | HINFC504_CON_READY_BUSY_SEL
617 | ((0x001 & HINFC504_CON_PAGESIZE_MASK)
618 << HINFC504_CON_PAGEISZE_SHIFT)
619 | ((0x0 & HINFC504_CON_ECCTYPE_MASK)
620 << HINFC504_CON_ECCTYPE_SHIFT)
621 | ((chip->options & NAND_BUSWIDTH_16) ?
622 HINFC504_CON_BUS_WIDTH : 0);
623 hinfc_write(host, flag, HINFC504_CON);
624
625 memset(host->mmio, 0xff, HINFC504_BUFFER_BASE_ADDRESS_LEN);
626
627 hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
628 HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
629
630 /* enable DMA irq */
631 hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN);
632}
633
634static struct nand_ecclayout nand_ecc_2K_16bits = {
635 .oobavail = 6,
636 .oobfree = { {2, 6} },
637};
638
639static int hisi_nfc_ecc_probe(struct hinfc_host *host)
640{
641 unsigned int flag;
642 int size, strength, ecc_bits;
643 struct device *dev = host->dev;
644 struct nand_chip *chip = &host->chip;
645 struct mtd_info *mtd = &host->mtd;
646 struct device_node *np = host->dev->of_node;
647
648 size = of_get_nand_ecc_step_size(np);
649 strength = of_get_nand_ecc_strength(np);
650 if (size != 1024) {
651 dev_err(dev, "error ecc size: %d\n", size);
652 return -EINVAL;
653 }
654
655 if ((size == 1024) && ((strength != 8) && (strength != 16) &&
656 (strength != 24) && (strength != 40))) {
657 dev_err(dev, "ecc size and strength do not match\n");
658 return -EINVAL;
659 }
660
661 chip->ecc.size = size;
662 chip->ecc.strength = strength;
663
664 chip->ecc.read_page = hisi_nand_read_page_hwecc;
665 chip->ecc.read_oob = hisi_nand_read_oob;
666 chip->ecc.write_page = hisi_nand_write_page_hwecc;
667
668 switch (chip->ecc.strength) {
669 case 16:
670 ecc_bits = 6;
671 if (mtd->writesize == 2048)
672 chip->ecc.layout = &nand_ecc_2K_16bits;
673
674 /* TODO: add more page size support */
675 break;
676
677 /* TODO: add more ecc strength support */
678 default:
679 dev_err(dev, "not support strength: %d\n", chip->ecc.strength);
680 return -EINVAL;
681 }
682
683 flag = hinfc_read(host, HINFC504_CON);
684 /* add ecc type configure */
685 flag |= ((ecc_bits & HINFC504_CON_ECCTYPE_MASK)
686 << HINFC504_CON_ECCTYPE_SHIFT);
687 hinfc_write(host, flag, HINFC504_CON);
688
689 /* enable ecc irq */
690 flag = hinfc_read(host, HINFC504_INTEN) & 0xfff;
691 hinfc_write(host, flag | HINFC504_INTEN_UE | HINFC504_INTEN_CE,
692 HINFC504_INTEN);
693
694 return 0;
695}
696
697static int hisi_nfc_probe(struct platform_device *pdev)
698{
699 int ret = 0, irq, buswidth, flag, max_chips = HINFC504_MAX_CHIP;
700 struct device *dev = &pdev->dev;
701 struct hinfc_host *host;
702 struct nand_chip *chip;
703 struct mtd_info *mtd;
704 struct resource *res;
705 struct device_node *np = dev->of_node;
706 struct mtd_part_parser_data ppdata;
707
708 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
709 if (!host)
710 return -ENOMEM;
711 host->dev = dev;
712
713 platform_set_drvdata(pdev, host);
714 chip = &host->chip;
715 mtd = &host->mtd;
716
717 irq = platform_get_irq(pdev, 0);
718 if (irq < 0) {
719 dev_err(dev, "no IRQ resource defined\n");
720 ret = -ENXIO;
721 goto err_res;
722 }
723
724 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
725 host->iobase = devm_ioremap_resource(dev, res);
726 if (IS_ERR(host->iobase)) {
727 ret = PTR_ERR(host->iobase);
728 goto err_res;
729 }
730
731 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
732 host->mmio = devm_ioremap_resource(dev, res);
733 if (IS_ERR(host->mmio)) {
734 ret = PTR_ERR(host->mmio);
735 dev_err(dev, "devm_ioremap_resource[1] fail\n");
736 goto err_res;
737 }
738
739 mtd->priv = chip;
740 mtd->owner = THIS_MODULE;
741 mtd->name = "hisi_nand";
742 mtd->dev.parent = &pdev->dev;
743
744 chip->priv = host;
745 chip->cmdfunc = hisi_nfc_cmdfunc;
746 chip->select_chip = hisi_nfc_select_chip;
747 chip->read_byte = hisi_nfc_read_byte;
748 chip->read_word = hisi_nfc_read_word;
749 chip->write_buf = hisi_nfc_write_buf;
750 chip->read_buf = hisi_nfc_read_buf;
751 chip->chip_delay = HINFC504_CHIP_DELAY;
752
753 chip->ecc.mode = of_get_nand_ecc_mode(np);
754
755 buswidth = of_get_nand_bus_width(np);
756 if (buswidth == 16)
757 chip->options |= NAND_BUSWIDTH_16;
758
759 hisi_nfc_host_init(host);
760
761 ret = devm_request_irq(dev, irq, hinfc_irq_handle, IRQF_DISABLED,
762 "nandc", host);
763 if (ret) {
764 dev_err(dev, "failed to request IRQ\n");
765 goto err_res;
766 }
767
768 ret = nand_scan_ident(mtd, max_chips, NULL);
769 if (ret) {
770 ret = -ENODEV;
771 goto err_res;
772 }
773
774 host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize,
775 &host->dma_buffer, GFP_KERNEL);
776 if (!host->buffer) {
777 ret = -ENOMEM;
778 goto err_res;
779 }
780
781 host->dma_oob = host->dma_buffer + mtd->writesize;
782 memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
783
784 flag = hinfc_read(host, HINFC504_CON);
785 flag &= ~(HINFC504_CON_PAGESIZE_MASK << HINFC504_CON_PAGEISZE_SHIFT);
786 switch (mtd->writesize) {
787 case 2048:
788 flag |= (0x001 << HINFC504_CON_PAGEISZE_SHIFT); break;
789 /*
790 * TODO: add more pagesize support,
791 * default pagesize has been set in hisi_nfc_host_init
792 */
793 default:
794 dev_err(dev, "NON-2KB page size nand flash\n");
795 ret = -EINVAL;
796 goto err_res;
797 }
798 hinfc_write(host, flag, HINFC504_CON);
799
800 if (chip->ecc.mode == NAND_ECC_HW)
801 hisi_nfc_ecc_probe(host);
802
803 ret = nand_scan_tail(mtd);
804 if (ret) {
805 dev_err(dev, "nand_scan_tail failed: %d\n", ret);
806 goto err_res;
807 }
808
809 ppdata.of_node = np;
810 ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
811 if (ret) {
812 dev_err(dev, "Err MTD partition=%d\n", ret);
813 goto err_mtd;
814 }
815
816 return 0;
817
818err_mtd:
819 nand_release(mtd);
820err_res:
821 return ret;
822}
823
824static int hisi_nfc_remove(struct platform_device *pdev)
825{
826 struct hinfc_host *host = platform_get_drvdata(pdev);
827 struct mtd_info *mtd = &host->mtd;
828
829 nand_release(mtd);
830
831 return 0;
832}
833
834#ifdef CONFIG_PM_SLEEP
835static int hisi_nfc_suspend(struct device *dev)
836{
837 struct hinfc_host *host = dev_get_drvdata(dev);
838 unsigned long timeout = jiffies + HINFC504_NFC_PM_TIMEOUT;
839
840 while (time_before(jiffies, timeout)) {
841 if (((hinfc_read(host, HINFC504_STATUS) & 0x1) == 0x0) &&
842 (hinfc_read(host, HINFC504_DMA_CTRL) &
843 HINFC504_DMA_CTRL_DMA_START)) {
844 cond_resched();
845 return 0;
846 }
847 }
848
849 dev_err(host->dev, "nand controller suspend timeout.\n");
850
851 return -EAGAIN;
852}
853
854static int hisi_nfc_resume(struct device *dev)
855{
856 int cs;
857 struct hinfc_host *host = dev_get_drvdata(dev);
858 struct nand_chip *chip = &host->chip;
859
860 for (cs = 0; cs < chip->numchips; cs++)
861 hisi_nfc_send_cmd_reset(host, cs);
862 hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
863 HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
864
865 return 0;
866}
867#endif
868static SIMPLE_DEV_PM_OPS(hisi_nfc_pm_ops, hisi_nfc_suspend, hisi_nfc_resume);
869
870static const struct of_device_id nfc_id_table[] = {
871 { .compatible = "hisilicon,504-nfc" },
872 {}
873};
874MODULE_DEVICE_TABLE(of, nfc_id_table);
875
876static struct platform_driver hisi_nfc_driver = {
877 .driver = {
878 .name = "hisi_nand",
879 .of_match_table = nfc_id_table,
880 .pm = &hisi_nfc_pm_ops,
881 },
882 .probe = hisi_nfc_probe,
883 .remove = hisi_nfc_remove,
884};
885
886module_platform_driver(hisi_nfc_driver);
887
888MODULE_LICENSE("GPL");
889MODULE_AUTHOR("Zhou Wang");
890MODULE_AUTHOR("Zhiyong Cai");
891MODULE_DESCRIPTION("Hisilicon Nand Flash Controller Driver");
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 1633ec9c5108..ebf2cce04cba 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -69,7 +69,7 @@ struct jz_nand {
69 69
70 int selected_bank; 70 int selected_bank;
71 71
72 struct jz_nand_platform_data *pdata; 72 struct gpio_desc *busy_gpio;
73 bool is_reading; 73 bool is_reading;
74}; 74};
75 75
@@ -131,7 +131,7 @@ static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
131static int jz_nand_dev_ready(struct mtd_info *mtd) 131static int jz_nand_dev_ready(struct mtd_info *mtd)
132{ 132{
133 struct jz_nand *nand = mtd_to_jz_nand(mtd); 133 struct jz_nand *nand = mtd_to_jz_nand(mtd);
134 return gpio_get_value_cansleep(nand->pdata->busy_gpio); 134 return gpiod_get_value_cansleep(nand->busy_gpio);
135} 135}
136 136
137static void jz_nand_hwctl(struct mtd_info *mtd, int mode) 137static void jz_nand_hwctl(struct mtd_info *mtd, int mode)
@@ -423,14 +423,12 @@ static int jz_nand_probe(struct platform_device *pdev)
423 if (ret) 423 if (ret)
424 goto err_free; 424 goto err_free;
425 425
426 if (pdata && gpio_is_valid(pdata->busy_gpio)) { 426 nand->busy_gpio = devm_gpiod_get_optional(&pdev->dev, "busy", GPIOD_IN);
427 ret = gpio_request(pdata->busy_gpio, "NAND busy pin"); 427 if (IS_ERR(nand->busy_gpio)) {
428 if (ret) { 428 ret = PTR_ERR(nand->busy_gpio);
429 dev_err(&pdev->dev, 429 dev_err(&pdev->dev, "Failed to request busy gpio %d\n",
430 "Failed to request busy gpio %d: %d\n", 430 ret);
431 pdata->busy_gpio, ret); 431 goto err_iounmap_mmio;
432 goto err_iounmap_mmio;
433 }
434 } 432 }
435 433
436 mtd = &nand->mtd; 434 mtd = &nand->mtd;
@@ -454,10 +452,9 @@ static int jz_nand_probe(struct platform_device *pdev)
454 chip->cmd_ctrl = jz_nand_cmd_ctrl; 452 chip->cmd_ctrl = jz_nand_cmd_ctrl;
455 chip->select_chip = jz_nand_select_chip; 453 chip->select_chip = jz_nand_select_chip;
456 454
457 if (pdata && gpio_is_valid(pdata->busy_gpio)) 455 if (nand->busy_gpio)
458 chip->dev_ready = jz_nand_dev_ready; 456 chip->dev_ready = jz_nand_dev_ready;
459 457
460 nand->pdata = pdata;
461 platform_set_drvdata(pdev, nand); 458 platform_set_drvdata(pdev, nand);
462 459
463 /* We are going to autodetect NAND chips in the banks specified in the 460 /* We are going to autodetect NAND chips in the banks specified in the
@@ -496,7 +493,7 @@ static int jz_nand_probe(struct platform_device *pdev)
496 } 493 }
497 if (chipnr == 0) { 494 if (chipnr == 0) {
498 dev_err(&pdev->dev, "No NAND chips found\n"); 495 dev_err(&pdev->dev, "No NAND chips found\n");
499 goto err_gpio_busy; 496 goto err_iounmap_mmio;
500 } 497 }
501 498
502 if (pdata && pdata->ident_callback) { 499 if (pdata && pdata->ident_callback) {
@@ -533,9 +530,6 @@ err_unclaim_banks:
533 nand->bank_base[bank - 1]); 530 nand->bank_base[bank - 1]);
534 } 531 }
535 writel(0, nand->base + JZ_REG_NAND_CTRL); 532 writel(0, nand->base + JZ_REG_NAND_CTRL);
536err_gpio_busy:
537 if (pdata && gpio_is_valid(pdata->busy_gpio))
538 gpio_free(pdata->busy_gpio);
539err_iounmap_mmio: 533err_iounmap_mmio:
540 jz_nand_iounmap_resource(nand->mem, nand->base); 534 jz_nand_iounmap_resource(nand->mem, nand->base);
541err_free: 535err_free:
@@ -546,7 +540,6 @@ err_free:
546static int jz_nand_remove(struct platform_device *pdev) 540static int jz_nand_remove(struct platform_device *pdev)
547{ 541{
548 struct jz_nand *nand = platform_get_drvdata(pdev); 542 struct jz_nand *nand = platform_get_drvdata(pdev);
549 struct jz_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
550 size_t i; 543 size_t i;
551 544
552 nand_release(&nand->mtd); 545 nand_release(&nand->mtd);
@@ -562,8 +555,6 @@ static int jz_nand_remove(struct platform_device *pdev)
562 gpio_free(JZ_GPIO_MEM_CS0 + bank - 1); 555 gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
563 } 556 }
564 } 557 }
565 if (pdata && gpio_is_valid(pdata->busy_gpio))
566 gpio_free(pdata->busy_gpio);
567 558
568 jz_nand_iounmap_resource(nand->mem, nand->base); 559 jz_nand_iounmap_resource(nand->mem, nand->base);
569 560
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 41585dfb206f..df7eb4ff07d1 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -157,7 +157,6 @@ static uint8_t nand_read_byte(struct mtd_info *mtd)
157 157
158/** 158/**
159 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip 159 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
160 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
161 * @mtd: MTD device structure 160 * @mtd: MTD device structure
162 * 161 *
163 * Default read function for 16bit buswidth with endianness conversion. 162 * Default read function for 16bit buswidth with endianness conversion.
@@ -1751,11 +1750,10 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1751static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1750static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1752 int page) 1751 int page)
1753{ 1752{
1754 uint8_t *buf = chip->oob_poi;
1755 int length = mtd->oobsize; 1753 int length = mtd->oobsize;
1756 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 1754 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
1757 int eccsize = chip->ecc.size; 1755 int eccsize = chip->ecc.size;
1758 uint8_t *bufpoi = buf; 1756 uint8_t *bufpoi = chip->oob_poi;
1759 int i, toread, sndrnd = 0, pos; 1757 int i, toread, sndrnd = 0, pos;
1760 1758
1761 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page); 1759 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
@@ -2944,6 +2942,16 @@ static void nand_resume(struct mtd_info *mtd)
2944 __func__); 2942 __func__);
2945} 2943}
2946 2944
2945/**
2946 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
2947 * prevent further operations
2948 * @mtd: MTD device structure
2949 */
2950static void nand_shutdown(struct mtd_info *mtd)
2951{
2952 nand_get_device(mtd, FL_SHUTDOWN);
2953}
2954
2947/* Set default functions */ 2955/* Set default functions */
2948static void nand_set_defaults(struct nand_chip *chip, int busw) 2956static void nand_set_defaults(struct nand_chip *chip, int busw)
2949{ 2957{
@@ -4028,22 +4036,24 @@ int nand_scan_tail(struct mtd_info *mtd)
4028 ecc->read_oob = nand_read_oob_std; 4036 ecc->read_oob = nand_read_oob_std;
4029 ecc->write_oob = nand_write_oob_std; 4037 ecc->write_oob = nand_write_oob_std;
4030 /* 4038 /*
4031 * Board driver should supply ecc.size and ecc.bytes values to 4039 * Board driver should supply ecc.size and ecc.strength values
4032 * select how many bits are correctable; see nand_bch_init() 4040 * to select how many bits are correctable. Otherwise, default
4033 * for details. Otherwise, default to 4 bits for large page 4041 * to 4 bits for large page devices.
4034 * devices.
4035 */ 4042 */
4036 if (!ecc->size && (mtd->oobsize >= 64)) { 4043 if (!ecc->size && (mtd->oobsize >= 64)) {
4037 ecc->size = 512; 4044 ecc->size = 512;
4038 ecc->bytes = DIV_ROUND_UP(13 * ecc->strength, 8); 4045 ecc->strength = 4;
4039 } 4046 }
4047
4048 /* See nand_bch_init() for details. */
4049 ecc->bytes = DIV_ROUND_UP(
4050 ecc->strength * fls(8 * ecc->size), 8);
4040 ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes, 4051 ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
4041 &ecc->layout); 4052 &ecc->layout);
4042 if (!ecc->priv) { 4053 if (!ecc->priv) {
4043 pr_warn("BCH ECC initialization failed!\n"); 4054 pr_warn("BCH ECC initialization failed!\n");
4044 BUG(); 4055 BUG();
4045 } 4056 }
4046 ecc->strength = ecc->bytes * 8 / fls(8 * ecc->size);
4047 break; 4057 break;
4048 4058
4049 case NAND_ECC_NONE: 4059 case NAND_ECC_NONE:
@@ -4146,6 +4156,7 @@ int nand_scan_tail(struct mtd_info *mtd)
4146 mtd->_unlock = NULL; 4156 mtd->_unlock = NULL;
4147 mtd->_suspend = nand_suspend; 4157 mtd->_suspend = nand_suspend;
4148 mtd->_resume = nand_resume; 4158 mtd->_resume = nand_resume;
4159 mtd->_reboot = nand_shutdown;
4149 mtd->_block_isreserved = nand_block_isreserved; 4160 mtd->_block_isreserved = nand_block_isreserved;
4150 mtd->_block_isbad = nand_block_isbad; 4161 mtd->_block_isbad = nand_block_isbad;
4151 mtd->_block_markbad = nand_block_markbad; 4162 mtd->_block_markbad = nand_block_markbad;
@@ -4161,7 +4172,7 @@ int nand_scan_tail(struct mtd_info *mtd)
4161 * properly set. 4172 * properly set.
4162 */ 4173 */
4163 if (!mtd->bitflip_threshold) 4174 if (!mtd->bitflip_threshold)
4164 mtd->bitflip_threshold = mtd->ecc_strength; 4175 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
4165 4176
4166 /* Check, if we should skip the bad block table scan */ 4177 /* Check, if we should skip the bad block table scan */
4167 if (chip->options & NAND_SKIP_BBTSCAN) 4178 if (chip->options & NAND_SKIP_BBTSCAN)
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index ab5bbf567439..f2324271b94e 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -245,7 +245,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
245#define STATE_DATAOUT 0x00001000 /* waiting for page data output */ 245#define STATE_DATAOUT 0x00001000 /* waiting for page data output */
246#define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */ 246#define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */
247#define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */ 247#define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */
248#define STATE_DATAOUT_STATUS_M 0x00004000 /* waiting for multi-plane status output */
249#define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */ 248#define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */
250 249
251/* Previous operation is done, ready to accept new requests */ 250/* Previous operation is done, ready to accept new requests */
@@ -269,7 +268,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
269#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */ 268#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
270#define OPT_PAGE512 0x00000002 /* 512-byte page chips */ 269#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
271#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */ 270#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
272#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
273#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ 271#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
274#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */ 272#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
275#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */ 273#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
@@ -1096,8 +1094,6 @@ static char *get_state_name(uint32_t state)
1096 return "STATE_DATAOUT_ID"; 1094 return "STATE_DATAOUT_ID";
1097 case STATE_DATAOUT_STATUS: 1095 case STATE_DATAOUT_STATUS:
1098 return "STATE_DATAOUT_STATUS"; 1096 return "STATE_DATAOUT_STATUS";
1099 case STATE_DATAOUT_STATUS_M:
1100 return "STATE_DATAOUT_STATUS_M";
1101 case STATE_READY: 1097 case STATE_READY:
1102 return "STATE_READY"; 1098 return "STATE_READY";
1103 case STATE_UNKNOWN: 1099 case STATE_UNKNOWN:
@@ -1865,7 +1861,6 @@ static void switch_state(struct nandsim *ns)
1865 break; 1861 break;
1866 1862
1867 case STATE_DATAOUT_STATUS: 1863 case STATE_DATAOUT_STATUS:
1868 case STATE_DATAOUT_STATUS_M:
1869 ns->regs.count = ns->regs.num = 0; 1864 ns->regs.count = ns->regs.num = 0;
1870 break; 1865 break;
1871 1866
@@ -2005,7 +2000,6 @@ static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
2005 } 2000 }
2006 2001
2007 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS 2002 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
2008 || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M
2009 || NS_STATE(ns->state) == STATE_DATAOUT) { 2003 || NS_STATE(ns->state) == STATE_DATAOUT) {
2010 int row = ns->regs.row; 2004 int row = ns->regs.row;
2011 2005
@@ -2343,6 +2337,7 @@ static int __init ns_init_module(void)
2343 } 2337 }
2344 chip->ecc.mode = NAND_ECC_SOFT_BCH; 2338 chip->ecc.mode = NAND_ECC_SOFT_BCH;
2345 chip->ecc.size = 512; 2339 chip->ecc.size = 512;
2340 chip->ecc.strength = bch;
2346 chip->ecc.bytes = eccbytes; 2341 chip->ecc.bytes = eccbytes;
2347 NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size); 2342 NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
2348 } 2343 }
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 63f858e6bf39..60fa89939c24 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1048,10 +1048,9 @@ static int omap_dev_ready(struct mtd_info *mtd)
1048 * @mtd: MTD device structure 1048 * @mtd: MTD device structure
1049 * @mode: Read/Write mode 1049 * @mode: Read/Write mode
1050 * 1050 *
1051 * When using BCH, sector size is hardcoded to 512 bytes. 1051 * When using BCH with SW correction (i.e. no ELM), sector size is set
1052 * Using wrapping mode 6 both for reading and writing if ELM module not uses 1052 * to 512 bytes and we use BCH_WRAPMODE_6 wrapping mode
1053 * for error correction. 1053 * for both reading and writing with:
1054 * On writing,
1055 * eccsize0 = 0 (no additional protected byte in spare area) 1054 * eccsize0 = 0 (no additional protected byte in spare area)
1056 * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area) 1055 * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
1057 */ 1056 */
@@ -1071,15 +1070,9 @@ static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)
1071 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1070 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1072 bch_type = 0; 1071 bch_type = 0;
1073 nsectors = 1; 1072 nsectors = 1;
1074 if (mode == NAND_ECC_READ) { 1073 wr_mode = BCH_WRAPMODE_6;
1075 wr_mode = BCH_WRAPMODE_6; 1074 ecc_size0 = BCH_ECC_SIZE0;
1076 ecc_size0 = BCH_ECC_SIZE0; 1075 ecc_size1 = BCH_ECC_SIZE1;
1077 ecc_size1 = BCH_ECC_SIZE1;
1078 } else {
1079 wr_mode = BCH_WRAPMODE_6;
1080 ecc_size0 = BCH_ECC_SIZE0;
1081 ecc_size1 = BCH_ECC_SIZE1;
1082 }
1083 break; 1076 break;
1084 case OMAP_ECC_BCH4_CODE_HW: 1077 case OMAP_ECC_BCH4_CODE_HW:
1085 bch_type = 0; 1078 bch_type = 0;
@@ -1097,15 +1090,9 @@ static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)
1097 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1090 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1098 bch_type = 1; 1091 bch_type = 1;
1099 nsectors = 1; 1092 nsectors = 1;
1100 if (mode == NAND_ECC_READ) { 1093 wr_mode = BCH_WRAPMODE_6;
1101 wr_mode = BCH_WRAPMODE_6; 1094 ecc_size0 = BCH_ECC_SIZE0;
1102 ecc_size0 = BCH_ECC_SIZE0; 1095 ecc_size1 = BCH_ECC_SIZE1;
1103 ecc_size1 = BCH_ECC_SIZE1;
1104 } else {
1105 wr_mode = BCH_WRAPMODE_6;
1106 ecc_size0 = BCH_ECC_SIZE0;
1107 ecc_size1 = BCH_ECC_SIZE1;
1108 }
1109 break; 1096 break;
1110 case OMAP_ECC_BCH8_CODE_HW: 1097 case OMAP_ECC_BCH8_CODE_HW:
1111 bch_type = 1; 1098 bch_type = 1;
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index ccaa8e283388..6f93b2990d25 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -1110,8 +1110,6 @@ static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
1110 1110
1111 switch (ecc->mode) { 1111 switch (ecc->mode) {
1112 case NAND_ECC_SOFT_BCH: 1112 case NAND_ECC_SOFT_BCH:
1113 ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * ecc->size),
1114 8);
1115 break; 1113 break;
1116 case NAND_ECC_HW: 1114 case NAND_ECC_HW:
1117 ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np); 1115 ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index 51b9d6af307f..a5dfbfbebfca 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -89,9 +89,10 @@ static int find_boot_record(struct NFTLrecord *nftl)
89 } 89 }
90 90
91 /* To be safer with BIOS, also use erase mark as discriminant */ 91 /* To be safer with BIOS, also use erase mark as discriminant */
92 if ((ret = nftl_read_oob(mtd, block * nftl->EraseSize + 92 ret = nftl_read_oob(mtd, block * nftl->EraseSize +
93 SECTORSIZE + 8, 8, &retlen, 93 SECTORSIZE + 8, 8, &retlen,
94 (char *)&h1) < 0)) { 94 (char *)&h1);
95 if (ret < 0) {
95 printk(KERN_WARNING "ANAND header found at 0x%x in mtd%d, but OOB data read failed (err %d)\n", 96 printk(KERN_WARNING "ANAND header found at 0x%x in mtd%d, but OOB data read failed (err %d)\n",
96 block * nftl->EraseSize, nftl->mbd.mtd->index, ret); 97 block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
97 continue; 98 continue;
@@ -109,8 +110,9 @@ static int find_boot_record(struct NFTLrecord *nftl)
109 } 110 }
110 111
111 /* Finally reread to check ECC */ 112 /* Finally reread to check ECC */
112 if ((ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE, 113 ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
113 &retlen, buf) < 0)) { 114 &retlen, buf);
115 if (ret < 0) {
114 printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n", 116 printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n",
115 block * nftl->EraseSize, nftl->mbd.mtd->index, ret); 117 block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
116 continue; 118 continue;
@@ -228,9 +230,11 @@ device is already correct.
228The new DiskOnChip driver already scanned the bad block table. Just query it. 230The new DiskOnChip driver already scanned the bad block table. Just query it.
229 if ((i & (SECTORSIZE - 1)) == 0) { 231 if ((i & (SECTORSIZE - 1)) == 0) {
230 /* read one sector for every SECTORSIZE of blocks */ 232 /* read one sector for every SECTORSIZE of blocks */
231 if ((ret = mtd->read(nftl->mbd.mtd, block * nftl->EraseSize + 233 ret = mtd->read(nftl->mbd.mtd,
232 i + SECTORSIZE, SECTORSIZE, &retlen, 234 block * nftl->EraseSize + i +
233 buf)) < 0) { 235 SECTORSIZE, SECTORSIZE,
236 &retlen, buf);
237 if (ret < 0) {
234 printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n", 238 printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n",
235 ret); 239 ret);
236 kfree(nftl->ReplUnitTable); 240 kfree(nftl->ReplUnitTable);
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 39763b94f67d..1c7308c2c77d 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -57,7 +57,9 @@
57 57
58#define QUADSPI_BUF3CR 0x1c 58#define QUADSPI_BUF3CR 0x1c
59#define QUADSPI_BUF3CR_ALLMST_SHIFT 31 59#define QUADSPI_BUF3CR_ALLMST_SHIFT 31
60#define QUADSPI_BUF3CR_ALLMST (1 << QUADSPI_BUF3CR_ALLMST_SHIFT) 60#define QUADSPI_BUF3CR_ALLMST_MASK (1 << QUADSPI_BUF3CR_ALLMST_SHIFT)
61#define QUADSPI_BUF3CR_ADATSZ_SHIFT 8
62#define QUADSPI_BUF3CR_ADATSZ_MASK (0xFF << QUADSPI_BUF3CR_ADATSZ_SHIFT)
61 63
62#define QUADSPI_BFGENCR 0x20 64#define QUADSPI_BFGENCR 0x20
63#define QUADSPI_BFGENCR_PAR_EN_SHIFT 16 65#define QUADSPI_BFGENCR_PAR_EN_SHIFT 16
@@ -198,18 +200,21 @@ struct fsl_qspi_devtype_data {
198 enum fsl_qspi_devtype devtype; 200 enum fsl_qspi_devtype devtype;
199 int rxfifo; 201 int rxfifo;
200 int txfifo; 202 int txfifo;
203 int ahb_buf_size;
201}; 204};
202 205
203static struct fsl_qspi_devtype_data vybrid_data = { 206static struct fsl_qspi_devtype_data vybrid_data = {
204 .devtype = FSL_QUADSPI_VYBRID, 207 .devtype = FSL_QUADSPI_VYBRID,
205 .rxfifo = 128, 208 .rxfifo = 128,
206 .txfifo = 64 209 .txfifo = 64,
210 .ahb_buf_size = 1024
207}; 211};
208 212
209static struct fsl_qspi_devtype_data imx6sx_data = { 213static struct fsl_qspi_devtype_data imx6sx_data = {
210 .devtype = FSL_QUADSPI_IMX6SX, 214 .devtype = FSL_QUADSPI_IMX6SX,
211 .rxfifo = 128, 215 .rxfifo = 128,
212 .txfifo = 512 216 .txfifo = 512,
217 .ahb_buf_size = 1024
213}; 218};
214 219
215#define FSL_QSPI_MAX_CHIP 4 220#define FSL_QSPI_MAX_CHIP 4
@@ -227,6 +232,7 @@ struct fsl_qspi {
227 u32 nor_num; 232 u32 nor_num;
228 u32 clk_rate; 233 u32 clk_rate;
229 unsigned int chip_base_addr; /* We may support two chips. */ 234 unsigned int chip_base_addr; /* We may support two chips. */
235 bool has_second_chip;
230}; 236};
231 237
232static inline int is_vybrid_qspi(struct fsl_qspi *q) 238static inline int is_vybrid_qspi(struct fsl_qspi *q)
@@ -583,7 +589,12 @@ static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
583 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR); 589 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
584 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR); 590 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
585 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR); 591 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
586 writel(QUADSPI_BUF3CR_ALLMST, base + QUADSPI_BUF3CR); 592 /*
593 * Set ADATSZ with the maximum AHB buffer size to improve the
594 * read performance.
595 */
596 writel(QUADSPI_BUF3CR_ALLMST_MASK | ((q->devtype_data->ahb_buf_size / 8)
597 << QUADSPI_BUF3CR_ADATSZ_SHIFT), base + QUADSPI_BUF3CR);
587 598
588 /* We only use the buffer3 */ 599 /* We only use the buffer3 */
589 writel(0, base + QUADSPI_BUF0IND); 600 writel(0, base + QUADSPI_BUF0IND);
@@ -783,7 +794,6 @@ static int fsl_qspi_probe(struct platform_device *pdev)
783 struct spi_nor *nor; 794 struct spi_nor *nor;
784 struct mtd_info *mtd; 795 struct mtd_info *mtd;
785 int ret, i = 0; 796 int ret, i = 0;
786 bool has_second_chip = false;
787 const struct of_device_id *of_id = 797 const struct of_device_id *of_id =
788 of_match_device(fsl_qspi_dt_ids, &pdev->dev); 798 of_match_device(fsl_qspi_dt_ids, &pdev->dev);
789 799
@@ -798,37 +808,30 @@ static int fsl_qspi_probe(struct platform_device *pdev)
798 /* find the resources */ 808 /* find the resources */
799 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI"); 809 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
800 q->iobase = devm_ioremap_resource(dev, res); 810 q->iobase = devm_ioremap_resource(dev, res);
801 if (IS_ERR(q->iobase)) { 811 if (IS_ERR(q->iobase))
802 ret = PTR_ERR(q->iobase); 812 return PTR_ERR(q->iobase);
803 goto map_failed;
804 }
805 813
806 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 814 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
807 "QuadSPI-memory"); 815 "QuadSPI-memory");
808 q->ahb_base = devm_ioremap_resource(dev, res); 816 q->ahb_base = devm_ioremap_resource(dev, res);
809 if (IS_ERR(q->ahb_base)) { 817 if (IS_ERR(q->ahb_base))
810 ret = PTR_ERR(q->ahb_base); 818 return PTR_ERR(q->ahb_base);
811 goto map_failed; 819
812 }
813 q->memmap_phy = res->start; 820 q->memmap_phy = res->start;
814 821
815 /* find the clocks */ 822 /* find the clocks */
816 q->clk_en = devm_clk_get(dev, "qspi_en"); 823 q->clk_en = devm_clk_get(dev, "qspi_en");
817 if (IS_ERR(q->clk_en)) { 824 if (IS_ERR(q->clk_en))
818 ret = PTR_ERR(q->clk_en); 825 return PTR_ERR(q->clk_en);
819 goto map_failed;
820 }
821 826
822 q->clk = devm_clk_get(dev, "qspi"); 827 q->clk = devm_clk_get(dev, "qspi");
823 if (IS_ERR(q->clk)) { 828 if (IS_ERR(q->clk))
824 ret = PTR_ERR(q->clk); 829 return PTR_ERR(q->clk);
825 goto map_failed;
826 }
827 830
828 ret = clk_prepare_enable(q->clk_en); 831 ret = clk_prepare_enable(q->clk_en);
829 if (ret) { 832 if (ret) {
830 dev_err(dev, "can not enable the qspi_en clock\n"); 833 dev_err(dev, "can not enable the qspi_en clock\n");
831 goto map_failed; 834 return ret;
832 } 835 }
833 836
834 ret = clk_prepare_enable(q->clk); 837 ret = clk_prepare_enable(q->clk);
@@ -860,14 +863,14 @@ static int fsl_qspi_probe(struct platform_device *pdev)
860 goto irq_failed; 863 goto irq_failed;
861 864
862 if (of_get_property(np, "fsl,qspi-has-second-chip", NULL)) 865 if (of_get_property(np, "fsl,qspi-has-second-chip", NULL))
863 has_second_chip = true; 866 q->has_second_chip = true;
864 867
865 /* iterate the subnodes. */ 868 /* iterate the subnodes. */
866 for_each_available_child_of_node(dev->of_node, np) { 869 for_each_available_child_of_node(dev->of_node, np) {
867 char modalias[40]; 870 char modalias[40];
868 871
869 /* skip the holes */ 872 /* skip the holes */
870 if (!has_second_chip) 873 if (!q->has_second_chip)
871 i *= 2; 874 i *= 2;
872 875
873 nor = &q->nor[i]; 876 nor = &q->nor[i];
@@ -890,24 +893,24 @@ static int fsl_qspi_probe(struct platform_device *pdev)
890 893
891 ret = of_modalias_node(np, modalias, sizeof(modalias)); 894 ret = of_modalias_node(np, modalias, sizeof(modalias));
892 if (ret < 0) 895 if (ret < 0)
893 goto map_failed; 896 goto irq_failed;
894 897
895 ret = of_property_read_u32(np, "spi-max-frequency", 898 ret = of_property_read_u32(np, "spi-max-frequency",
896 &q->clk_rate); 899 &q->clk_rate);
897 if (ret < 0) 900 if (ret < 0)
898 goto map_failed; 901 goto irq_failed;
899 902
900 /* set the chip address for READID */ 903 /* set the chip address for READID */
901 fsl_qspi_set_base_addr(q, nor); 904 fsl_qspi_set_base_addr(q, nor);
902 905
903 ret = spi_nor_scan(nor, modalias, SPI_NOR_QUAD); 906 ret = spi_nor_scan(nor, modalias, SPI_NOR_QUAD);
904 if (ret) 907 if (ret)
905 goto map_failed; 908 goto irq_failed;
906 909
907 ppdata.of_node = np; 910 ppdata.of_node = np;
908 ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); 911 ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
909 if (ret) 912 if (ret)
910 goto map_failed; 913 goto irq_failed;
911 914
912 /* Set the correct NOR size now. */ 915 /* Set the correct NOR size now. */
913 if (q->nor_size == 0) { 916 if (q->nor_size == 0) {
@@ -939,19 +942,19 @@ static int fsl_qspi_probe(struct platform_device *pdev)
939 942
940 clk_disable(q->clk); 943 clk_disable(q->clk);
941 clk_disable(q->clk_en); 944 clk_disable(q->clk_en);
942 dev_info(dev, "QuadSPI SPI NOR flash driver\n");
943 return 0; 945 return 0;
944 946
945last_init_failed: 947last_init_failed:
946 for (i = 0; i < q->nor_num; i++) 948 for (i = 0; i < q->nor_num; i++) {
949 /* skip the holes */
950 if (!q->has_second_chip)
951 i *= 2;
947 mtd_device_unregister(&q->mtd[i]); 952 mtd_device_unregister(&q->mtd[i]);
948 953 }
949irq_failed: 954irq_failed:
950 clk_disable_unprepare(q->clk); 955 clk_disable_unprepare(q->clk);
951clk_failed: 956clk_failed:
952 clk_disable_unprepare(q->clk_en); 957 clk_disable_unprepare(q->clk_en);
953map_failed:
954 dev_err(dev, "Freescale QuadSPI probe failed\n");
955 return ret; 958 return ret;
956} 959}
957 960
@@ -960,8 +963,12 @@ static int fsl_qspi_remove(struct platform_device *pdev)
960 struct fsl_qspi *q = platform_get_drvdata(pdev); 963 struct fsl_qspi *q = platform_get_drvdata(pdev);
961 int i; 964 int i;
962 965
963 for (i = 0; i < q->nor_num; i++) 966 for (i = 0; i < q->nor_num; i++) {
967 /* skip the holes */
968 if (!q->has_second_chip)
969 i *= 2;
964 mtd_device_unregister(&q->mtd[i]); 970 mtd_device_unregister(&q->mtd[i]);
971 }
965 972
966 /* disable the hardware */ 973 /* disable the hardware */
967 writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); 974 writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
@@ -972,6 +979,22 @@ static int fsl_qspi_remove(struct platform_device *pdev)
972 return 0; 979 return 0;
973} 980}
974 981
982static int fsl_qspi_suspend(struct platform_device *pdev, pm_message_t state)
983{
984 return 0;
985}
986
987static int fsl_qspi_resume(struct platform_device *pdev)
988{
989 struct fsl_qspi *q = platform_get_drvdata(pdev);
990
991 fsl_qspi_nor_setup(q);
992 fsl_qspi_set_map_addr(q);
993 fsl_qspi_nor_setup_last(q);
994
995 return 0;
996}
997
975static struct platform_driver fsl_qspi_driver = { 998static struct platform_driver fsl_qspi_driver = {
976 .driver = { 999 .driver = {
977 .name = "fsl-quadspi", 1000 .name = "fsl-quadspi",
@@ -980,6 +1003,8 @@ static struct platform_driver fsl_qspi_driver = {
980 }, 1003 },
981 .probe = fsl_qspi_probe, 1004 .probe = fsl_qspi_probe,
982 .remove = fsl_qspi_remove, 1005 .remove = fsl_qspi_remove,
1006 .suspend = fsl_qspi_suspend,
1007 .resume = fsl_qspi_resume,
983}; 1008};
984module_platform_driver(fsl_qspi_driver); 1009module_platform_driver(fsl_qspi_driver);
985 1010
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 0f8ec3c2d015..b6a5a0c269e1 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -538,6 +538,7 @@ static const struct spi_device_id spi_nor_ids[] = {
538 /* GigaDevice */ 538 /* GigaDevice */
539 { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) }, 539 { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
540 { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) }, 540 { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
541 { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) },
541 542
542 /* Intel/Numonyx -- xxxs33b */ 543 /* Intel/Numonyx -- xxxs33b */
543 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, 544 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
@@ -560,14 +561,14 @@ static const struct spi_device_id spi_nor_ids[] = {
560 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, 561 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
561 562
562 /* Micron */ 563 /* Micron */
563 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) }, 564 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
564 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) }, 565 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SPI_NOR_QUAD_READ) },
565 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) }, 566 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
566 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) }, 567 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
567 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) }, 568 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
568 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) }, 569 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
569 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, USE_FSR) }, 570 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
570 { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, USE_FSR) }, 571 { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
571 572
572 /* PMC */ 573 /* PMC */
573 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, 574 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
@@ -891,6 +892,45 @@ static int spansion_quad_enable(struct spi_nor *nor)
891 return 0; 892 return 0;
892} 893}
893 894
895static int micron_quad_enable(struct spi_nor *nor)
896{
897 int ret;
898 u8 val;
899
900 ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
901 if (ret < 0) {
902 dev_err(nor->dev, "error %d reading EVCR\n", ret);
903 return ret;
904 }
905
906 write_enable(nor);
907
908 /* set EVCR, enable quad I/O */
909 nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
910 ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1, 0);
911 if (ret < 0) {
912 dev_err(nor->dev, "error while writing EVCR register\n");
913 return ret;
914 }
915
916 ret = spi_nor_wait_till_ready(nor);
917 if (ret)
918 return ret;
919
920 /* read EVCR and check it */
921 ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
922 if (ret < 0) {
923 dev_err(nor->dev, "error %d reading EVCR\n", ret);
924 return ret;
925 }
926 if (val & EVCR_QUAD_EN_MICRON) {
927 dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
928 return -EINVAL;
929 }
930
931 return 0;
932}
933
894static int set_quad_mode(struct spi_nor *nor, struct flash_info *info) 934static int set_quad_mode(struct spi_nor *nor, struct flash_info *info)
895{ 935{
896 int status; 936 int status;
@@ -903,6 +943,13 @@ static int set_quad_mode(struct spi_nor *nor, struct flash_info *info)
903 return -EINVAL; 943 return -EINVAL;
904 } 944 }
905 return status; 945 return status;
946 case CFI_MFR_ST:
947 status = micron_quad_enable(nor);
948 if (status) {
949 dev_err(nor->dev, "Micron quad-read not enabled\n");
950 return -EINVAL;
951 }
952 return status;
906 default: 953 default:
907 status = spansion_quad_enable(nor); 954 status = spansion_quad_enable(nor);
908 if (status) { 955 if (status) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 84673ebcf428..df51d6025a90 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -157,7 +157,7 @@ config IPVLAN
157 making it transparent to the connected L2 switch. 157 making it transparent to the connected L2 switch.
158 158
159 Ipvlan devices can be added using the "ip" command from the 159 Ipvlan devices can be added using the "ip" command from the
160 iproute2 package starting with the iproute2-X.Y.ZZ release: 160 iproute2 package starting with the iproute2-3.19 release:
161 161
162 "ip link add link <main-dev> [ NAME ] type ipvlan" 162 "ip link add link <main-dev> [ NAME ] type ipvlan"
163 163
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index 4ce6ca5f3d36..dc6b78e5342f 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -40,7 +40,7 @@ config DEV_APPLETALK
40 40
41config LTPC 41config LTPC
42 tristate "Apple/Farallon LocalTalk PC support" 42 tristate "Apple/Farallon LocalTalk PC support"
43 depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API 43 depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
44 help 44 help
45 This allows you to use the AppleTalk PC card to connect to LocalTalk 45 This allows you to use the AppleTalk PC card to connect to LocalTalk
46 networks. The card is also known as the Farallon PhoneNet PC card. 46 networks. The card is also known as the Farallon PhoneNet PC card.
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 0f217e99904f..22e2ebf31333 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -107,8 +107,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
107{ \ 107{ \
108 u32 indir, dir; \ 108 u32 indir, dir; \
109 spin_lock(&priv->indir_lock); \ 109 spin_lock(&priv->indir_lock); \
110 indir = reg_readl(priv, REG_DIR_DATA_READ); \
111 dir = __raw_readl(priv->name + off); \ 110 dir = __raw_readl(priv->name + off); \
111 indir = reg_readl(priv, REG_DIR_DATA_READ); \
112 spin_unlock(&priv->indir_lock); \ 112 spin_unlock(&priv->indir_lock); \
113 return (u64)indir << 32 | dir; \ 113 return (u64)indir << 32 | dir; \
114} \ 114} \
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 7769c05543f1..ec6eac1f8c95 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev)
484 link->open++; 484 link->open++;
485 485
486 info->link_status = 0x00; 486 info->link_status = 0x00;
487 init_timer(&info->watchdog); 487 setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
488 info->watchdog.function = ei_watchdog; 488 mod_timer(&info->watchdog, jiffies + HZ);
489 info->watchdog.data = (u_long)dev;
490 info->watchdog.expires = jiffies + HZ;
491 add_timer(&info->watchdog);
492 489
493 return ax_open(dev); 490 return ax_open(dev);
494} /* axnet_open */ 491} /* axnet_open */
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 9fb7b9d4fd6c..2777289a26c0 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev)
918 918
919 info->phy_id = info->eth_phy; 919 info->phy_id = info->eth_phy;
920 info->link_status = 0x00; 920 info->link_status = 0x00;
921 init_timer(&info->watchdog); 921 setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
922 info->watchdog.function = ei_watchdog; 922 mod_timer(&info->watchdog, jiffies + HZ);
923 info->watchdog.data = (u_long)dev;
924 info->watchdog.expires = jiffies + HZ;
925 add_timer(&info->watchdog);
926 923
927 return ei_open(dev); 924 return ei_open(dev);
928} /* pcnet_open */ 925} /* pcnet_open */
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index a1ee261bff5c..fd9296a5014d 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
376 u16 pktlength; 376 u16 pktlength;
377 u16 pktstatus; 377 u16 pktstatus;
378 378
379 while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) { 379 while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
380 (count < limit)) {
380 pktstatus = rxstatus >> 16; 381 pktstatus = rxstatus >> 16;
381 pktlength = rxstatus & 0xffff; 382 pktlength = rxstatus & 0xffff;
382 383
@@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget)
491 struct altera_tse_private *priv = 492 struct altera_tse_private *priv =
492 container_of(napi, struct altera_tse_private, napi); 493 container_of(napi, struct altera_tse_private, napi);
493 int rxcomplete = 0; 494 int rxcomplete = 0;
494 int txcomplete = 0;
495 unsigned long int flags; 495 unsigned long int flags;
496 496
497 txcomplete = tse_tx_complete(priv); 497 tse_tx_complete(priv);
498 498
499 rxcomplete = tse_rx(priv, budget); 499 rxcomplete = tse_rx(priv, budget);
500 500
501 if (rxcomplete >= budget || txcomplete > 0) 501 if (rxcomplete < budget) {
502 return rxcomplete;
503 502
504 napi_gro_flush(napi, false); 503 napi_gro_flush(napi, false);
505 __napi_complete(napi); 504 __napi_complete(napi);
506 505
507 netdev_dbg(priv->dev, 506 netdev_dbg(priv->dev,
508 "NAPI Complete, did %d packets with budget %d\n", 507 "NAPI Complete, did %d packets with budget %d\n",
509 txcomplete+rxcomplete, budget); 508 rxcomplete, budget);
510 509
511 spin_lock_irqsave(&priv->rxdma_irq_lock, flags); 510 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
512 priv->dmaops->enable_rxirq(priv); 511 priv->dmaops->enable_rxirq(priv);
513 priv->dmaops->enable_txirq(priv); 512 priv->dmaops->enable_txirq(priv);
514 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); 513 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
515 return rxcomplete + txcomplete; 514 }
515 return rxcomplete;
516} 516}
517 517
518/* DMA TX & RX FIFO interrupt routing 518/* DMA TX & RX FIFO interrupt routing
@@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
521{ 521{
522 struct net_device *dev = dev_id; 522 struct net_device *dev = dev_id;
523 struct altera_tse_private *priv; 523 struct altera_tse_private *priv;
524 unsigned long int flags;
525 524
526 if (unlikely(!dev)) { 525 if (unlikely(!dev)) {
527 pr_err("%s: invalid dev pointer\n", __func__); 526 pr_err("%s: invalid dev pointer\n", __func__);
@@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
529 } 528 }
530 priv = netdev_priv(dev); 529 priv = netdev_priv(dev);
531 530
532 /* turn off desc irqs and enable napi rx */ 531 spin_lock(&priv->rxdma_irq_lock);
533 spin_lock_irqsave(&priv->rxdma_irq_lock, flags); 532 /* reset IRQs */
533 priv->dmaops->clear_rxirq(priv);
534 priv->dmaops->clear_txirq(priv);
535 spin_unlock(&priv->rxdma_irq_lock);
534 536
535 if (likely(napi_schedule_prep(&priv->napi))) { 537 if (likely(napi_schedule_prep(&priv->napi))) {
538 spin_lock(&priv->rxdma_irq_lock);
536 priv->dmaops->disable_rxirq(priv); 539 priv->dmaops->disable_rxirq(priv);
537 priv->dmaops->disable_txirq(priv); 540 priv->dmaops->disable_txirq(priv);
541 spin_unlock(&priv->rxdma_irq_lock);
538 __napi_schedule(&priv->napi); 542 __napi_schedule(&priv->napi);
539 } 543 }
540 544
541 /* reset IRQs */
542 priv->dmaops->clear_rxirq(priv);
543 priv->dmaops->clear_txirq(priv);
544
545 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
546 545
547 return IRQ_HANDLED; 546 return IRQ_HANDLED;
548} 547}
@@ -1407,7 +1406,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1407 } 1406 }
1408 1407
1409 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", 1408 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1410 &priv->rx_fifo_depth)) { 1409 &priv->tx_fifo_depth)) {
1411 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); 1410 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1412 ret = -ENXIO; 1411 ret = -ENXIO;
1413 goto err_free_netdev; 1412 goto err_free_netdev;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b93d4404d975..885b02b5be07 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
609 } 609 }
610} 610}
611 611
612static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
613{
614 struct xgbe_channel *channel;
615 struct net_device *netdev = pdata->netdev;
616 unsigned int i;
617 int ret;
618
619 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
620 netdev->name, pdata);
621 if (ret) {
622 netdev_alert(netdev, "error requesting irq %d\n",
623 pdata->dev_irq);
624 return ret;
625 }
626
627 if (!pdata->per_channel_irq)
628 return 0;
629
630 channel = pdata->channel;
631 for (i = 0; i < pdata->channel_count; i++, channel++) {
632 snprintf(channel->dma_irq_name,
633 sizeof(channel->dma_irq_name) - 1,
634 "%s-TxRx-%u", netdev_name(netdev),
635 channel->queue_index);
636
637 ret = devm_request_irq(pdata->dev, channel->dma_irq,
638 xgbe_dma_isr, 0,
639 channel->dma_irq_name, channel);
640 if (ret) {
641 netdev_alert(netdev, "error requesting irq %d\n",
642 channel->dma_irq);
643 goto err_irq;
644 }
645 }
646
647 return 0;
648
649err_irq:
650 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
651 for (i--, channel--; i < pdata->channel_count; i--, channel--)
652 devm_free_irq(pdata->dev, channel->dma_irq, channel);
653
654 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
655
656 return ret;
657}
658
659static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
660{
661 struct xgbe_channel *channel;
662 unsigned int i;
663
664 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
665
666 if (!pdata->per_channel_irq)
667 return;
668
669 channel = pdata->channel;
670 for (i = 0; i < pdata->channel_count; i++, channel++)
671 devm_free_irq(pdata->dev, channel->dma_irq, channel);
672}
673
612void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) 674void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
613{ 675{
614 struct xgbe_hw_if *hw_if = &pdata->hw_if; 676 struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
810 return -EINVAL; 872 return -EINVAL;
811 } 873 }
812 874
813 phy_stop(pdata->phydev);
814
815 spin_lock_irqsave(&pdata->lock, flags); 875 spin_lock_irqsave(&pdata->lock, flags);
816 876
817 if (caller == XGMAC_DRIVER_CONTEXT) 877 if (caller == XGMAC_DRIVER_CONTEXT)
818 netif_device_detach(netdev); 878 netif_device_detach(netdev);
819 879
820 netif_tx_stop_all_queues(netdev); 880 netif_tx_stop_all_queues(netdev);
821 xgbe_napi_disable(pdata, 0);
822 881
823 /* Powerdown Tx/Rx */
824 hw_if->powerdown_tx(pdata); 882 hw_if->powerdown_tx(pdata);
825 hw_if->powerdown_rx(pdata); 883 hw_if->powerdown_rx(pdata);
826 884
885 xgbe_napi_disable(pdata, 0);
886
887 phy_stop(pdata->phydev);
888
827 pdata->power_down = 1; 889 pdata->power_down = 1;
828 890
829 spin_unlock_irqrestore(&pdata->lock, flags); 891 spin_unlock_irqrestore(&pdata->lock, flags);
@@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
854 916
855 phy_start(pdata->phydev); 917 phy_start(pdata->phydev);
856 918
857 /* Enable Tx/Rx */ 919 xgbe_napi_enable(pdata, 0);
920
858 hw_if->powerup_tx(pdata); 921 hw_if->powerup_tx(pdata);
859 hw_if->powerup_rx(pdata); 922 hw_if->powerup_rx(pdata);
860 923
861 if (caller == XGMAC_DRIVER_CONTEXT) 924 if (caller == XGMAC_DRIVER_CONTEXT)
862 netif_device_attach(netdev); 925 netif_device_attach(netdev);
863 926
864 xgbe_napi_enable(pdata, 0);
865 netif_tx_start_all_queues(netdev); 927 netif_tx_start_all_queues(netdev);
866 928
867 spin_unlock_irqrestore(&pdata->lock, flags); 929 spin_unlock_irqrestore(&pdata->lock, flags);
@@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
875{ 937{
876 struct xgbe_hw_if *hw_if = &pdata->hw_if; 938 struct xgbe_hw_if *hw_if = &pdata->hw_if;
877 struct net_device *netdev = pdata->netdev; 939 struct net_device *netdev = pdata->netdev;
940 int ret;
878 941
879 DBGPR("-->xgbe_start\n"); 942 DBGPR("-->xgbe_start\n");
880 943
@@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
884 947
885 phy_start(pdata->phydev); 948 phy_start(pdata->phydev);
886 949
950 xgbe_napi_enable(pdata, 1);
951
952 ret = xgbe_request_irqs(pdata);
953 if (ret)
954 goto err_napi;
955
887 hw_if->enable_tx(pdata); 956 hw_if->enable_tx(pdata);
888 hw_if->enable_rx(pdata); 957 hw_if->enable_rx(pdata);
889 958
890 xgbe_init_tx_timers(pdata); 959 xgbe_init_tx_timers(pdata);
891 960
892 xgbe_napi_enable(pdata, 1);
893 netif_tx_start_all_queues(netdev); 961 netif_tx_start_all_queues(netdev);
894 962
895 DBGPR("<--xgbe_start\n"); 963 DBGPR("<--xgbe_start\n");
896 964
897 return 0; 965 return 0;
966
967err_napi:
968 xgbe_napi_disable(pdata, 1);
969
970 phy_stop(pdata->phydev);
971
972 hw_if->exit(pdata);
973
974 return ret;
898} 975}
899 976
900static void xgbe_stop(struct xgbe_prv_data *pdata) 977static void xgbe_stop(struct xgbe_prv_data *pdata)
@@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
907 984
908 DBGPR("-->xgbe_stop\n"); 985 DBGPR("-->xgbe_stop\n");
909 986
910 phy_stop(pdata->phydev);
911
912 netif_tx_stop_all_queues(netdev); 987 netif_tx_stop_all_queues(netdev);
913 xgbe_napi_disable(pdata, 1);
914 988
915 xgbe_stop_tx_timers(pdata); 989 xgbe_stop_tx_timers(pdata);
916 990
917 hw_if->disable_tx(pdata); 991 hw_if->disable_tx(pdata);
918 hw_if->disable_rx(pdata); 992 hw_if->disable_rx(pdata);
919 993
994 xgbe_free_irqs(pdata);
995
996 xgbe_napi_disable(pdata, 1);
997
998 phy_stop(pdata->phydev);
999
1000 hw_if->exit(pdata);
1001
920 channel = pdata->channel; 1002 channel = pdata->channel;
921 for (i = 0; i < pdata->channel_count; i++, channel++) { 1003 for (i = 0; i < pdata->channel_count; i++, channel++) {
922 if (!channel->tx_ring) 1004 if (!channel->tx_ring)
@@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
931 1013
932static void xgbe_restart_dev(struct xgbe_prv_data *pdata) 1014static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
933{ 1015{
934 struct xgbe_channel *channel;
935 struct xgbe_hw_if *hw_if = &pdata->hw_if;
936 unsigned int i;
937
938 DBGPR("-->xgbe_restart_dev\n"); 1016 DBGPR("-->xgbe_restart_dev\n");
939 1017
940 /* If not running, "restart" will happen on open */ 1018 /* If not running, "restart" will happen on open */
@@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
942 return; 1020 return;
943 1021
944 xgbe_stop(pdata); 1022 xgbe_stop(pdata);
945 synchronize_irq(pdata->dev_irq);
946 if (pdata->per_channel_irq) {
947 channel = pdata->channel;
948 for (i = 0; i < pdata->channel_count; i++, channel++)
949 synchronize_irq(channel->dma_irq);
950 }
951 1023
952 xgbe_free_tx_data(pdata); 1024 xgbe_free_tx_data(pdata);
953 xgbe_free_rx_data(pdata); 1025 xgbe_free_rx_data(pdata);
954 1026
955 /* Issue software reset to device */
956 hw_if->exit(pdata);
957
958 xgbe_start(pdata); 1027 xgbe_start(pdata);
959 1028
960 DBGPR("<--xgbe_restart_dev\n"); 1029 DBGPR("<--xgbe_restart_dev\n");
@@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1283static int xgbe_open(struct net_device *netdev) 1352static int xgbe_open(struct net_device *netdev)
1284{ 1353{
1285 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1354 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1286 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1287 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1355 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1288 struct xgbe_channel *channel = NULL;
1289 unsigned int i = 0;
1290 int ret; 1356 int ret;
1291 1357
1292 DBGPR("-->xgbe_open\n"); 1358 DBGPR("-->xgbe_open\n");
@@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev)
1329 INIT_WORK(&pdata->restart_work, xgbe_restart); 1395 INIT_WORK(&pdata->restart_work, xgbe_restart);
1330 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); 1396 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1331 1397
1332 /* Request interrupts */
1333 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
1334 netdev->name, pdata);
1335 if (ret) {
1336 netdev_alert(netdev, "error requesting irq %d\n",
1337 pdata->dev_irq);
1338 goto err_rings;
1339 }
1340
1341 if (pdata->per_channel_irq) {
1342 channel = pdata->channel;
1343 for (i = 0; i < pdata->channel_count; i++, channel++) {
1344 snprintf(channel->dma_irq_name,
1345 sizeof(channel->dma_irq_name) - 1,
1346 "%s-TxRx-%u", netdev_name(netdev),
1347 channel->queue_index);
1348
1349 ret = devm_request_irq(pdata->dev, channel->dma_irq,
1350 xgbe_dma_isr, 0,
1351 channel->dma_irq_name, channel);
1352 if (ret) {
1353 netdev_alert(netdev,
1354 "error requesting irq %d\n",
1355 channel->dma_irq);
1356 goto err_irq;
1357 }
1358 }
1359 }
1360
1361 ret = xgbe_start(pdata); 1398 ret = xgbe_start(pdata);
1362 if (ret) 1399 if (ret)
1363 goto err_start; 1400 goto err_rings;
1364 1401
1365 DBGPR("<--xgbe_open\n"); 1402 DBGPR("<--xgbe_open\n");
1366 1403
1367 return 0; 1404 return 0;
1368 1405
1369err_start:
1370 hw_if->exit(pdata);
1371
1372err_irq:
1373 if (pdata->per_channel_irq) {
1374 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
1375 for (i--, channel--; i < pdata->channel_count; i--, channel--)
1376 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1377 }
1378
1379 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1380
1381err_rings: 1406err_rings:
1382 desc_if->free_ring_resources(pdata); 1407 desc_if->free_ring_resources(pdata);
1383 1408
@@ -1399,30 +1424,16 @@ err_phy_init:
1399static int xgbe_close(struct net_device *netdev) 1424static int xgbe_close(struct net_device *netdev)
1400{ 1425{
1401 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1426 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1402 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1403 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1427 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1404 struct xgbe_channel *channel;
1405 unsigned int i;
1406 1428
1407 DBGPR("-->xgbe_close\n"); 1429 DBGPR("-->xgbe_close\n");
1408 1430
1409 /* Stop the device */ 1431 /* Stop the device */
1410 xgbe_stop(pdata); 1432 xgbe_stop(pdata);
1411 1433
1412 /* Issue software reset to device */
1413 hw_if->exit(pdata);
1414
1415 /* Free the ring descriptors and buffers */ 1434 /* Free the ring descriptors and buffers */
1416 desc_if->free_ring_resources(pdata); 1435 desc_if->free_ring_resources(pdata);
1417 1436
1418 /* Release the interrupts */
1419 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1420 if (pdata->per_channel_irq) {
1421 channel = pdata->channel;
1422 for (i = 0; i < pdata->channel_count; i++, channel++)
1423 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1424 }
1425
1426 /* Free the channel and ring structures */ 1437 /* Free the channel and ring structures */
1427 xgbe_free_channels(pdata); 1438 xgbe_free_channels(pdata);
1428 1439
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5b308a4a4d0e..783543ad1fcf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
274 /* RBUF misc statistics */ 274 /* RBUF misc statistics */
275 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 275 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
276 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 276 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
277 STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 277 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
278 STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed), 278 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
279 STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed), 279 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
280}; 280};
281 281
282#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 282#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
345 s = &bcm_sysport_gstrings_stats[i]; 345 s = &bcm_sysport_gstrings_stats[i];
346 switch (s->type) { 346 switch (s->type) {
347 case BCM_SYSPORT_STAT_NETDEV: 347 case BCM_SYSPORT_STAT_NETDEV:
348 case BCM_SYSPORT_STAT_SOFT:
348 continue; 349 continue;
349 case BCM_SYSPORT_STAT_MIB_RX: 350 case BCM_SYSPORT_STAT_MIB_RX:
350 case BCM_SYSPORT_STAT_MIB_TX: 351 case BCM_SYSPORT_STAT_MIB_TX:
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index fc19417d82a5..7e3d87a88c76 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -570,6 +570,7 @@ enum bcm_sysport_stat_type {
570 BCM_SYSPORT_STAT_RUNT, 570 BCM_SYSPORT_STAT_RUNT,
571 BCM_SYSPORT_STAT_RXCHK, 571 BCM_SYSPORT_STAT_RXCHK,
572 BCM_SYSPORT_STAT_RBUF, 572 BCM_SYSPORT_STAT_RBUF,
573 BCM_SYSPORT_STAT_SOFT,
573}; 574};
574 575
575/* Macros to help define ethtool statistics */ 576/* Macros to help define ethtool statistics */
@@ -590,6 +591,7 @@ enum bcm_sysport_stat_type {
590#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX) 591#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
591#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX) 592#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
592#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT) 593#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
594#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
593 595
594#define STAT_RXCHK(str, m, ofs) { \ 596#define STAT_RXCHK(str, m, ofs) { \
595 .stat_string = str, \ 597 .stat_string = str, \
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 51300532ec26..84feb241d60b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -487,6 +487,7 @@ enum bcmgenet_stat_type {
487 BCMGENET_STAT_MIB_TX, 487 BCMGENET_STAT_MIB_TX,
488 BCMGENET_STAT_RUNT, 488 BCMGENET_STAT_RUNT,
489 BCMGENET_STAT_MISC, 489 BCMGENET_STAT_MISC,
490 BCMGENET_STAT_SOFT,
490}; 491};
491 492
492struct bcmgenet_stats { 493struct bcmgenet_stats {
@@ -515,6 +516,7 @@ struct bcmgenet_stats {
515#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) 516#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
516#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) 517#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
517#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) 518#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
519#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
518 520
519#define STAT_GENET_MISC(str, m, offset) { \ 521#define STAT_GENET_MISC(str, m, offset) { \
520 .stat_string = str, \ 522 .stat_string = str, \
@@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
614 UMAC_RBUF_OVFL_CNT), 616 UMAC_RBUF_OVFL_CNT),
615 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), 617 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
616 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), 618 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
617 STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 619 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
618 STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed), 620 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
619 STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed), 621 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
620}; 622};
621 623
622#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) 624#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
668 s = &bcmgenet_gstrings_stats[i]; 670 s = &bcmgenet_gstrings_stats[i];
669 switch (s->type) { 671 switch (s->type) {
670 case BCMGENET_STAT_NETDEV: 672 case BCMGENET_STAT_NETDEV:
673 case BCMGENET_STAT_SOFT:
671 continue; 674 continue;
672 case BCMGENET_STAT_MIB_RX: 675 case BCMGENET_STAT_MIB_RX:
673 case BCMGENET_STAT_MIB_TX: 676 case BCMGENET_STAT_MIB_TX:
@@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
971} 974}
972 975
973/* Unlocked version of the reclaim routine */ 976/* Unlocked version of the reclaim routine */
974static void __bcmgenet_tx_reclaim(struct net_device *dev, 977static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
975 struct bcmgenet_tx_ring *ring) 978 struct bcmgenet_tx_ring *ring)
976{ 979{
977 struct bcmgenet_priv *priv = netdev_priv(dev); 980 struct bcmgenet_priv *priv = netdev_priv(dev);
978 int last_tx_cn, last_c_index, num_tx_bds; 981 int last_tx_cn, last_c_index, num_tx_bds;
979 struct enet_cb *tx_cb_ptr; 982 struct enet_cb *tx_cb_ptr;
980 struct netdev_queue *txq; 983 struct netdev_queue *txq;
984 unsigned int pkts_compl = 0;
981 unsigned int bds_compl; 985 unsigned int bds_compl;
982 unsigned int c_index; 986 unsigned int c_index;
983 987
@@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
1005 tx_cb_ptr = ring->cbs + last_c_index; 1009 tx_cb_ptr = ring->cbs + last_c_index;
1006 bds_compl = 0; 1010 bds_compl = 0;
1007 if (tx_cb_ptr->skb) { 1011 if (tx_cb_ptr->skb) {
1012 pkts_compl++;
1008 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; 1013 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
1009 dev->stats.tx_bytes += tx_cb_ptr->skb->len; 1014 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1010 dma_unmap_single(&dev->dev, 1015 dma_unmap_single(&dev->dev,
@@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
1028 last_c_index &= (num_tx_bds - 1); 1033 last_c_index &= (num_tx_bds - 1);
1029 } 1034 }
1030 1035
1031 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) 1036 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1032 ring->int_disable(priv, ring); 1037 if (netif_tx_queue_stopped(txq))
1033 1038 netif_tx_wake_queue(txq);
1034 if (netif_tx_queue_stopped(txq)) 1039 }
1035 netif_tx_wake_queue(txq);
1036 1040
1037 ring->c_index = c_index; 1041 ring->c_index = c_index;
1042
1043 return pkts_compl;
1038} 1044}
1039 1045
1040static void bcmgenet_tx_reclaim(struct net_device *dev, 1046static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1041 struct bcmgenet_tx_ring *ring) 1047 struct bcmgenet_tx_ring *ring)
1042{ 1048{
1049 unsigned int released;
1043 unsigned long flags; 1050 unsigned long flags;
1044 1051
1045 spin_lock_irqsave(&ring->lock, flags); 1052 spin_lock_irqsave(&ring->lock, flags);
1046 __bcmgenet_tx_reclaim(dev, ring); 1053 released = __bcmgenet_tx_reclaim(dev, ring);
1047 spin_unlock_irqrestore(&ring->lock, flags); 1054 spin_unlock_irqrestore(&ring->lock, flags);
1055
1056 return released;
1057}
1058
1059static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1060{
1061 struct bcmgenet_tx_ring *ring =
1062 container_of(napi, struct bcmgenet_tx_ring, napi);
1063 unsigned int work_done = 0;
1064
1065 work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
1066
1067 if (work_done == 0) {
1068 napi_complete(napi);
1069 ring->int_enable(ring->priv, ring);
1070
1071 return 0;
1072 }
1073
1074 return budget;
1048} 1075}
1049 1076
1050static void bcmgenet_tx_reclaim_all(struct net_device *dev) 1077static void bcmgenet_tx_reclaim_all(struct net_device *dev)
@@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1302 bcmgenet_tdma_ring_writel(priv, ring->index, 1329 bcmgenet_tdma_ring_writel(priv, ring->index,
1303 ring->prod_index, TDMA_PROD_INDEX); 1330 ring->prod_index, TDMA_PROD_INDEX);
1304 1331
1305 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { 1332 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1306 netif_tx_stop_queue(txq); 1333 netif_tx_stop_queue(txq);
1307 ring->int_enable(priv, ring);
1308 }
1309 1334
1310out: 1335out:
1311 spin_unlock_irqrestore(&ring->lock, flags); 1336 spin_unlock_irqrestore(&ring->lock, flags);
@@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv)
1621 struct device *kdev = &priv->pdev->dev; 1646 struct device *kdev = &priv->pdev->dev;
1622 int ret; 1647 int ret;
1623 u32 reg, cpu_mask_clear; 1648 u32 reg, cpu_mask_clear;
1649 int index;
1624 1650
1625 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); 1651 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1626 1652
@@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv)
1647 1673
1648 bcmgenet_intr_disable(priv); 1674 bcmgenet_intr_disable(priv);
1649 1675
1650 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; 1676 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
1651 1677
1652 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); 1678 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
1653 1679
@@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv)
1674 1700
1675 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); 1701 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
1676 1702
1703 for (index = 0; index < priv->hw_params->tx_queues; index++)
1704 bcmgenet_intrl2_1_writel(priv, (1 << index),
1705 INTRL2_CPU_MASK_CLEAR);
1706
1677 /* Enable rx/tx engine.*/ 1707 /* Enable rx/tx engine.*/
1678 dev_dbg(kdev, "done init umac\n"); 1708 dev_dbg(kdev, "done init umac\n");
1679 1709
@@ -1690,6 +1720,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1690 u32 flow_period_val = 0; 1720 u32 flow_period_val = 0;
1691 1721
1692 spin_lock_init(&ring->lock); 1722 spin_lock_init(&ring->lock);
1723 ring->priv = priv;
1724 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1693 ring->index = index; 1725 ring->index = index;
1694 if (index == DESC_INDEX) { 1726 if (index == DESC_INDEX) {
1695 ring->queue = 0; 1727 ring->queue = 0;
@@ -1732,6 +1764,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1732 TDMA_WRITE_PTR); 1764 TDMA_WRITE_PTR);
1733 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, 1765 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1734 DMA_END_ADDR); 1766 DMA_END_ADDR);
1767
1768 napi_enable(&ring->napi);
1769}
1770
1771static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
1772 unsigned int index)
1773{
1774 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1775
1776 napi_disable(&ring->napi);
1777 netif_napi_del(&ring->napi);
1735} 1778}
1736 1779
1737/* Initialize a RDMA ring */ 1780/* Initialize a RDMA ring */
@@ -1896,7 +1939,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
1896 return ret; 1939 return ret;
1897} 1940}
1898 1941
1899static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) 1942static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1900{ 1943{
1901 int i; 1944 int i;
1902 1945
@@ -1915,6 +1958,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1915 kfree(priv->tx_cbs); 1958 kfree(priv->tx_cbs);
1916} 1959}
1917 1960
1961static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1962{
1963 int i;
1964
1965 bcmgenet_fini_tx_ring(priv, DESC_INDEX);
1966
1967 for (i = 0; i < priv->hw_params->tx_queues; i++)
1968 bcmgenet_fini_tx_ring(priv, i);
1969
1970 __bcmgenet_fini_dma(priv);
1971}
1972
1918/* init_edma: Initialize DMA control register */ 1973/* init_edma: Initialize DMA control register */
1919static int bcmgenet_init_dma(struct bcmgenet_priv *priv) 1974static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1920{ 1975{
@@ -1943,7 +1998,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1943 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), 1998 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
1944 GFP_KERNEL); 1999 GFP_KERNEL);
1945 if (!priv->tx_cbs) { 2000 if (!priv->tx_cbs) {
1946 bcmgenet_fini_dma(priv); 2001 __bcmgenet_fini_dma(priv);
1947 return -ENOMEM; 2002 return -ENOMEM;
1948 } 2003 }
1949 2004
@@ -1965,9 +2020,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
1965 struct bcmgenet_priv, napi); 2020 struct bcmgenet_priv, napi);
1966 unsigned int work_done; 2021 unsigned int work_done;
1967 2022
1968 /* tx reclaim */
1969 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1970
1971 work_done = bcmgenet_desc_rx(priv, budget); 2023 work_done = bcmgenet_desc_rx(priv, budget);
1972 2024
1973 /* Advancing our consumer index*/ 2025 /* Advancing our consumer index*/
@@ -2012,28 +2064,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
2012static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) 2064static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2013{ 2065{
2014 struct bcmgenet_priv *priv = dev_id; 2066 struct bcmgenet_priv *priv = dev_id;
2067 struct bcmgenet_tx_ring *ring;
2015 unsigned int index; 2068 unsigned int index;
2016 2069
2017 /* Save irq status for bottom-half processing. */ 2070 /* Save irq status for bottom-half processing. */
2018 priv->irq1_stat = 2071 priv->irq1_stat =
2019 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & 2072 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2020 ~priv->int1_mask; 2073 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2021 /* clear interrupts */ 2074 /* clear interrupts */
2022 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 2075 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2023 2076
2024 netif_dbg(priv, intr, priv->dev, 2077 netif_dbg(priv, intr, priv->dev,
2025 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); 2078 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2079
2026 /* Check the MBDONE interrupts. 2080 /* Check the MBDONE interrupts.
2027 * packet is done, reclaim descriptors 2081 * packet is done, reclaim descriptors
2028 */ 2082 */
2029 if (priv->irq1_stat & 0x0000ffff) { 2083 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2030 index = 0; 2084 if (!(priv->irq1_stat & BIT(index)))
2031 for (index = 0; index < 16; index++) { 2085 continue;
2032 if (priv->irq1_stat & (1 << index)) 2086
2033 bcmgenet_tx_reclaim(priv->dev, 2087 ring = &priv->tx_rings[index];
2034 &priv->tx_rings[index]); 2088
2089 if (likely(napi_schedule_prep(&ring->napi))) {
2090 ring->int_disable(priv, ring);
2091 __napi_schedule(&ring->napi);
2035 } 2092 }
2036 } 2093 }
2094
2037 return IRQ_HANDLED; 2095 return IRQ_HANDLED;
2038} 2096}
2039 2097
@@ -2065,8 +2123,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2065 } 2123 }
2066 if (priv->irq0_stat & 2124 if (priv->irq0_stat &
2067 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { 2125 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
2068 /* Tx reclaim */ 2126 struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
2069 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); 2127
2128 if (likely(napi_schedule_prep(&ring->napi))) {
2129 ring->int_disable(priv, ring);
2130 __napi_schedule(&ring->napi);
2131 }
2070 } 2132 }
2071 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | 2133 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2072 UMAC_IRQ_PHY_DET_F | 2134 UMAC_IRQ_PHY_DET_F |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 3a8a90f95365..016bd12bf493 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -520,6 +520,7 @@ struct bcmgenet_hw_params {
520 520
521struct bcmgenet_tx_ring { 521struct bcmgenet_tx_ring {
522 spinlock_t lock; /* ring lock */ 522 spinlock_t lock; /* ring lock */
523 struct napi_struct napi; /* NAPI per tx queue */
523 unsigned int index; /* ring index */ 524 unsigned int index; /* ring index */
524 unsigned int queue; /* queue index */ 525 unsigned int queue; /* queue index */
525 struct enet_cb *cbs; /* tx ring buffer control block*/ 526 struct enet_cb *cbs; /* tx ring buffer control block*/
@@ -534,6 +535,7 @@ struct bcmgenet_tx_ring {
534 struct bcmgenet_tx_ring *); 535 struct bcmgenet_tx_ring *);
535 void (*int_disable)(struct bcmgenet_priv *priv, 536 void (*int_disable)(struct bcmgenet_priv *priv,
536 struct bcmgenet_tx_ring *); 537 struct bcmgenet_tx_ring *);
538 struct bcmgenet_priv *priv;
537}; 539};
538 540
539/* device context */ 541/* device context */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 9062a8434246..c308429dd9c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
35} 35}
36 36
37static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr, 37static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
38 int addr_len) 38 u8 v6)
39{ 39{
40 return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) : 40 return v6 ? ipv6_clip_hash(ctbl, addr) :
41 ipv6_clip_hash(ctbl, addr); 41 ipv4_clip_hash(ctbl, addr);
42} 42}
43 43
44static int clip6_get_mbox(const struct net_device *dev, 44static int clip6_get_mbox(const struct net_device *dev,
@@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
78 struct clip_entry *ce, *cte; 78 struct clip_entry *ce, *cte;
79 u32 *addr = (u32 *)lip; 79 u32 *addr = (u32 *)lip;
80 int hash; 80 int hash;
81 int addr_len; 81 int ret = -1;
82 int ret = 0;
83 82
84 if (!ctbl) 83 if (!ctbl)
85 return 0; 84 return 0;
86 85
87 if (v6) 86 hash = clip_addr_hash(ctbl, addr, v6);
88 addr_len = 16;
89 else
90 addr_len = 4;
91
92 hash = clip_addr_hash(ctbl, addr, addr_len);
93 87
94 read_lock_bh(&ctbl->lock); 88 read_lock_bh(&ctbl->lock);
95 list_for_each_entry(cte, &ctbl->hash_list[hash], list) { 89 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
96 if (addr_len == cte->addr_len && 90 if (cte->addr6.sin6_family == AF_INET6 && v6)
97 memcmp(lip, cte->addr, cte->addr_len) == 0) { 91 ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
92 sizeof(struct in6_addr));
93 else if (cte->addr.sin_family == AF_INET && !v6)
94 ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
95 sizeof(struct in_addr));
96 if (!ret) {
98 ce = cte; 97 ce = cte;
99 read_unlock_bh(&ctbl->lock); 98 read_unlock_bh(&ctbl->lock);
100 goto found; 99 goto found;
@@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
111 spin_lock_init(&ce->lock); 110 spin_lock_init(&ce->lock);
112 atomic_set(&ce->refcnt, 0); 111 atomic_set(&ce->refcnt, 0);
113 atomic_dec(&ctbl->nfree); 112 atomic_dec(&ctbl->nfree);
114 ce->addr_len = addr_len;
115 memcpy(ce->addr, lip, addr_len);
116 list_add_tail(&ce->list, &ctbl->hash_list[hash]); 113 list_add_tail(&ce->list, &ctbl->hash_list[hash]);
117 if (v6) { 114 if (v6) {
115 ce->addr6.sin6_family = AF_INET6;
116 memcpy(ce->addr6.sin6_addr.s6_addr,
117 lip, sizeof(struct in6_addr));
118 ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); 118 ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
119 if (ret) { 119 if (ret) {
120 write_unlock_bh(&ctbl->lock); 120 write_unlock_bh(&ctbl->lock);
121 return ret; 121 return ret;
122 } 122 }
123 } else {
124 ce->addr.sin_family = AF_INET;
125 memcpy((char *)(&ce->addr.sin_addr), lip,
126 sizeof(struct in_addr));
123 } 127 }
124 } else { 128 } else {
125 write_unlock_bh(&ctbl->lock); 129 write_unlock_bh(&ctbl->lock);
@@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
140 struct clip_entry *ce, *cte; 144 struct clip_entry *ce, *cte;
141 u32 *addr = (u32 *)lip; 145 u32 *addr = (u32 *)lip;
142 int hash; 146 int hash;
143 int addr_len; 147 int ret = -1;
144
145 if (v6)
146 addr_len = 16;
147 else
148 addr_len = 4;
149 148
150 hash = clip_addr_hash(ctbl, addr, addr_len); 149 hash = clip_addr_hash(ctbl, addr, v6);
151 150
152 read_lock_bh(&ctbl->lock); 151 read_lock_bh(&ctbl->lock);
153 list_for_each_entry(cte, &ctbl->hash_list[hash], list) { 152 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
154 if (addr_len == cte->addr_len && 153 if (cte->addr6.sin6_family == AF_INET6 && v6)
155 memcmp(lip, cte->addr, cte->addr_len) == 0) { 154 ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
155 sizeof(struct in6_addr));
156 else if (cte->addr.sin_family == AF_INET && !v6)
157 ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
158 sizeof(struct in_addr));
159 if (!ret) {
156 ce = cte; 160 ce = cte;
157 read_unlock_bh(&ctbl->lock); 161 read_unlock_bh(&ctbl->lock);
158 goto found; 162 goto found;
@@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
249 for (i = 0 ; i < ctbl->clipt_size; ++i) { 253 for (i = 0 ; i < ctbl->clipt_size; ++i) {
250 list_for_each_entry(ce, &ctbl->hash_list[i], list) { 254 list_for_each_entry(ce, &ctbl->hash_list[i], list) {
251 ip[0] = '\0'; 255 ip[0] = '\0';
252 if (ce->addr_len == 16) 256 sprintf(ip, "%pISc", &ce->addr);
253 sprintf(ip, "%pI6c", ce->addr);
254 else
255 sprintf(ip, "%pI4c", ce->addr);
256 seq_printf(seq, "%-25s %u\n", ip, 257 seq_printf(seq, "%-25s %u\n", ip,
257 atomic_read(&ce->refcnt)); 258 atomic_read(&ce->refcnt));
258 } 259 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 2eaba0161cf8..35eb43c6bcbb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -14,8 +14,10 @@ struct clip_entry {
14 spinlock_t lock; /* Hold while modifying clip reference */ 14 spinlock_t lock; /* Hold while modifying clip reference */
15 atomic_t refcnt; 15 atomic_t refcnt;
16 struct list_head list; 16 struct list_head list;
17 u32 addr[4]; 17 union {
18 int addr_len; 18 struct sockaddr_in addr;
19 struct sockaddr_in6 addr6;
20 };
19}; 21};
20 22
21struct clip_tbl { 23struct clip_tbl {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d6cda17efe6e..97842d03675b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
1103#define T4_MEMORY_WRITE 0 1103#define T4_MEMORY_WRITE 0
1104#define T4_MEMORY_READ 1 1104#define T4_MEMORY_READ 1
1105int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, 1105int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
1106 __be32 *buf, int dir); 1106 void *buf, int dir);
1107static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, 1107static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
1108 u32 len, __be32 *buf) 1108 u32 len, __be32 *buf)
1109{ 1109{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4d643b65265e..853c38997c82 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
450 * @addr: address within indicated memory type 450 * @addr: address within indicated memory type
451 * @len: amount of memory to transfer 451 * @len: amount of memory to transfer
452 * @buf: host memory buffer 452 * @hbuf: host memory buffer
453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) 453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
454 * 454 *
455 * Reads/writes an [almost] arbitrary memory region in the firmware: the 455 * Reads/writes an [almost] arbitrary memory region in the firmware: the
@@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
460 * caller's responsibility to perform appropriate byte order conversions. 460 * caller's responsibility to perform appropriate byte order conversions.
461 */ 461 */
462int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, 462int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
463 u32 len, __be32 *buf, int dir) 463 u32 len, void *hbuf, int dir)
464{ 464{
465 u32 pos, offset, resid, memoffset; 465 u32 pos, offset, resid, memoffset;
466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; 466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
467 u32 *buf;
467 468
468 /* Argument sanity checks ... 469 /* Argument sanity checks ...
469 */ 470 */
470 if (addr & 0x3) 471 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
471 return -EINVAL; 472 return -EINVAL;
473 buf = (u32 *)hbuf;
472 474
473 /* It's convenient to be able to handle lengths which aren't a 475 /* It's convenient to be able to handle lengths which aren't a
474 * multiple of 32-bits because we often end up transferring files to 476 * multiple of 32-bits because we often end up transferring files to
@@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
532 534
533 /* Transfer data to/from the adapter as long as there's an integral 535 /* Transfer data to/from the adapter as long as there's an integral
534 * number of 32-bit transfers to complete. 536 * number of 32-bit transfers to complete.
537 *
538 * A note on Endianness issues:
539 *
540 * The "register" reads and writes below from/to the PCI-E Memory
541 * Window invoke the standard adapter Big-Endian to PCI-E Link
542 * Little-Endian "swizzel." As a result, if we have the following
543 * data in adapter memory:
544 *
545 * Memory: ... | b0 | b1 | b2 | b3 | ...
546 * Address: i+0 i+1 i+2 i+3
547 *
548 * Then a read of the adapter memory via the PCI-E Memory Window
549 * will yield:
550 *
551 * x = readl(i)
552 * 31 0
553 * [ b3 | b2 | b1 | b0 ]
554 *
555 * If this value is stored into local memory on a Little-Endian system
556 * it will show up correctly in local memory as:
557 *
558 * ( ..., b0, b1, b2, b3, ... )
559 *
560 * But on a Big-Endian system, the store will show up in memory
561 * incorrectly swizzled as:
562 *
563 * ( ..., b3, b2, b1, b0, ... )
564 *
565 * So we need to account for this in the reads and writes to the
566 * PCI-E Memory Window below by undoing the register read/write
567 * swizzels.
535 */ 568 */
536 while (len > 0) { 569 while (len > 0) {
537 if (dir == T4_MEMORY_READ) 570 if (dir == T4_MEMORY_READ)
538 *buf++ = (__force __be32) t4_read_reg(adap, 571 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
539 mem_base + offset); 572 mem_base + offset));
540 else 573 else
541 t4_write_reg(adap, mem_base + offset, 574 t4_write_reg(adap, mem_base + offset,
542 (__force u32) *buf++); 575 (__force u32)cpu_to_le32(*buf++));
543 offset += sizeof(__be32); 576 offset += sizeof(__be32);
544 len -= sizeof(__be32); 577 len -= sizeof(__be32);
545 578
@@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
568 */ 601 */
569 if (resid) { 602 if (resid) {
570 union { 603 union {
571 __be32 word; 604 u32 word;
572 char byte[4]; 605 char byte[4];
573 } last; 606 } last;
574 unsigned char *bp; 607 unsigned char *bp;
575 int i; 608 int i;
576 609
577 if (dir == T4_MEMORY_READ) { 610 if (dir == T4_MEMORY_READ) {
578 last.word = (__force __be32) t4_read_reg(adap, 611 last.word = le32_to_cpu(
579 mem_base + offset); 612 (__force __le32)t4_read_reg(adap,
613 mem_base + offset));
580 for (bp = (unsigned char *)buf, i = resid; i < 4; i++) 614 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
581 bp[i] = last.byte[i]; 615 bp[i] = last.byte[i];
582 } else { 616 } else {
@@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
584 for (i = resid; i < 4; i++) 618 for (i = resid; i < 4; i++)
585 last.byte[i] = 0; 619 last.byte[i] = 0;
586 t4_write_reg(adap, mem_base + offset, 620 t4_write_reg(adap, mem_base + offset,
587 (__force u32) last.word); 621 (__force u32)cpu_to_le32(last.word));
588 } 622 }
589 } 623 }
590 624
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index a368c0a96ec7..204bd182473b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
272 } 272 }
273 273
274 if (ENIC_TEST_INTR(pba, notify_intr)) { 274 if (ENIC_TEST_INTR(pba, notify_intr)) {
275 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
276 enic_notify_check(enic); 275 enic_notify_check(enic);
276 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
277 } 277 }
278 278
279 if (ENIC_TEST_INTR(pba, err_intr)) { 279 if (ENIC_TEST_INTR(pba, err_intr)) {
@@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
346 struct enic *enic = data; 346 struct enic *enic = data;
347 unsigned int intr = enic_msix_notify_intr(enic); 347 unsigned int intr = enic_msix_notify_intr(enic);
348 348
349 vnic_intr_return_all_credits(&enic->intr[intr]);
350 enic_notify_check(enic); 349 enic_notify_check(enic);
350 vnic_intr_return_all_credits(&enic->intr[intr]);
351 351
352 return IRQ_HANDLED; 352 return IRQ_HANDLED;
353} 353}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 43df78882e48..178e54028d10 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3162,8 +3162,8 @@ static void adjust_link(struct net_device *dev)
3162 struct phy_device *phydev = priv->phydev; 3162 struct phy_device *phydev = priv->phydev;
3163 3163
3164 if (unlikely(phydev->link != priv->oldlink || 3164 if (unlikely(phydev->link != priv->oldlink ||
3165 phydev->duplex != priv->oldduplex || 3165 (phydev->link && (phydev->duplex != priv->oldduplex ||
3166 phydev->speed != priv->oldspeed)) 3166 phydev->speed != priv->oldspeed))))
3167 gfar_update_link_state(priv); 3167 gfar_update_link_state(priv);
3168} 3168}
3169 3169
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index e8a1adb7a962..c05e50759621 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
3262 device_remove_file(&dev->dev, &dev_attr_remove_port); 3262 device_remove_file(&dev->dev, &dev_attr_remove_port);
3263} 3263}
3264 3264
3265static int ehea_reboot_notifier(struct notifier_block *nb,
3266 unsigned long action, void *unused)
3267{
3268 if (action == SYS_RESTART) {
3269 pr_info("Reboot: freeing all eHEA resources\n");
3270 ibmebus_unregister_driver(&ehea_driver);
3271 }
3272 return NOTIFY_DONE;
3273}
3274
3275static struct notifier_block ehea_reboot_nb = {
3276 .notifier_call = ehea_reboot_notifier,
3277};
3278
3279static int ehea_mem_notifier(struct notifier_block *nb,
3280 unsigned long action, void *data)
3281{
3282 int ret = NOTIFY_BAD;
3283 struct memory_notify *arg = data;
3284
3285 mutex_lock(&dlpar_mem_lock);
3286
3287 switch (action) {
3288 case MEM_CANCEL_OFFLINE:
3289 pr_info("memory offlining canceled");
3290 /* Fall through: re-add canceled memory block */
3291
3292 case MEM_ONLINE:
3293 pr_info("memory is going online");
3294 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3295 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3296 goto out_unlock;
3297 ehea_rereg_mrs();
3298 break;
3299
3300 case MEM_GOING_OFFLINE:
3301 pr_info("memory is going offline");
3302 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3303 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3304 goto out_unlock;
3305 ehea_rereg_mrs();
3306 break;
3307
3308 default:
3309 break;
3310 }
3311
3312 ehea_update_firmware_handles();
3313 ret = NOTIFY_OK;
3314
3315out_unlock:
3316 mutex_unlock(&dlpar_mem_lock);
3317 return ret;
3318}
3319
3320static struct notifier_block ehea_mem_nb = {
3321 .notifier_call = ehea_mem_notifier,
3322};
3323
3324static void ehea_crash_handler(void)
3325{
3326 int i;
3327
3328 if (ehea_fw_handles.arr)
3329 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3330 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3331 ehea_fw_handles.arr[i].fwh,
3332 FORCE_FREE);
3333
3334 if (ehea_bcmc_regs.arr)
3335 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3336 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3337 ehea_bcmc_regs.arr[i].port_id,
3338 ehea_bcmc_regs.arr[i].reg_type,
3339 ehea_bcmc_regs.arr[i].macaddr,
3340 0, H_DEREG_BCMC);
3341}
3342
3343static atomic_t ehea_memory_hooks_registered;
3344
3345/* Register memory hooks on probe of first adapter */
3346static int ehea_register_memory_hooks(void)
3347{
3348 int ret = 0;
3349
3350 if (atomic_inc_and_test(&ehea_memory_hooks_registered))
3351 return 0;
3352
3353 ret = ehea_create_busmap();
3354 if (ret) {
3355 pr_info("ehea_create_busmap failed\n");
3356 goto out;
3357 }
3358
3359 ret = register_reboot_notifier(&ehea_reboot_nb);
3360 if (ret) {
3361 pr_info("register_reboot_notifier failed\n");
3362 goto out;
3363 }
3364
3365 ret = register_memory_notifier(&ehea_mem_nb);
3366 if (ret) {
3367 pr_info("register_memory_notifier failed\n");
3368 goto out2;
3369 }
3370
3371 ret = crash_shutdown_register(ehea_crash_handler);
3372 if (ret) {
3373 pr_info("crash_shutdown_register failed\n");
3374 goto out3;
3375 }
3376
3377 return 0;
3378
3379out3:
3380 unregister_memory_notifier(&ehea_mem_nb);
3381out2:
3382 unregister_reboot_notifier(&ehea_reboot_nb);
3383out:
3384 return ret;
3385}
3386
3387static void ehea_unregister_memory_hooks(void)
3388{
3389 if (atomic_read(&ehea_memory_hooks_registered))
3390 return;
3391
3392 unregister_reboot_notifier(&ehea_reboot_nb);
3393 if (crash_shutdown_unregister(ehea_crash_handler))
3394 pr_info("failed unregistering crash handler\n");
3395 unregister_memory_notifier(&ehea_mem_nb);
3396}
3397
3265static int ehea_probe_adapter(struct platform_device *dev) 3398static int ehea_probe_adapter(struct platform_device *dev)
3266{ 3399{
3267 struct ehea_adapter *adapter; 3400 struct ehea_adapter *adapter;
@@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev)
3269 int ret; 3402 int ret;
3270 int i; 3403 int i;
3271 3404
3405 ret = ehea_register_memory_hooks();
3406 if (ret)
3407 return ret;
3408
3272 if (!dev || !dev->dev.of_node) { 3409 if (!dev || !dev->dev.of_node) {
3273 pr_err("Invalid ibmebus device probed\n"); 3410 pr_err("Invalid ibmebus device probed\n");
3274 return -EINVAL; 3411 return -EINVAL;
@@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev)
3392 return 0; 3529 return 0;
3393} 3530}
3394 3531
3395static void ehea_crash_handler(void)
3396{
3397 int i;
3398
3399 if (ehea_fw_handles.arr)
3400 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3401 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3402 ehea_fw_handles.arr[i].fwh,
3403 FORCE_FREE);
3404
3405 if (ehea_bcmc_regs.arr)
3406 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3407 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3408 ehea_bcmc_regs.arr[i].port_id,
3409 ehea_bcmc_regs.arr[i].reg_type,
3410 ehea_bcmc_regs.arr[i].macaddr,
3411 0, H_DEREG_BCMC);
3412}
3413
3414static int ehea_mem_notifier(struct notifier_block *nb,
3415 unsigned long action, void *data)
3416{
3417 int ret = NOTIFY_BAD;
3418 struct memory_notify *arg = data;
3419
3420 mutex_lock(&dlpar_mem_lock);
3421
3422 switch (action) {
3423 case MEM_CANCEL_OFFLINE:
3424 pr_info("memory offlining canceled");
3425 /* Readd canceled memory block */
3426 case MEM_ONLINE:
3427 pr_info("memory is going online");
3428 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3429 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3430 goto out_unlock;
3431 ehea_rereg_mrs();
3432 break;
3433 case MEM_GOING_OFFLINE:
3434 pr_info("memory is going offline");
3435 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3436 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3437 goto out_unlock;
3438 ehea_rereg_mrs();
3439 break;
3440 default:
3441 break;
3442 }
3443
3444 ehea_update_firmware_handles();
3445 ret = NOTIFY_OK;
3446
3447out_unlock:
3448 mutex_unlock(&dlpar_mem_lock);
3449 return ret;
3450}
3451
3452static struct notifier_block ehea_mem_nb = {
3453 .notifier_call = ehea_mem_notifier,
3454};
3455
3456static int ehea_reboot_notifier(struct notifier_block *nb,
3457 unsigned long action, void *unused)
3458{
3459 if (action == SYS_RESTART) {
3460 pr_info("Reboot: freeing all eHEA resources\n");
3461 ibmebus_unregister_driver(&ehea_driver);
3462 }
3463 return NOTIFY_DONE;
3464}
3465
3466static struct notifier_block ehea_reboot_nb = {
3467 .notifier_call = ehea_reboot_notifier,
3468};
3469
3470static int check_module_parm(void) 3532static int check_module_parm(void)
3471{ 3533{
3472 int ret = 0; 3534 int ret = 0;
@@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void)
3520 if (ret) 3582 if (ret)
3521 goto out; 3583 goto out;
3522 3584
3523 ret = ehea_create_busmap();
3524 if (ret)
3525 goto out;
3526
3527 ret = register_reboot_notifier(&ehea_reboot_nb);
3528 if (ret)
3529 pr_info("failed registering reboot notifier\n");
3530
3531 ret = register_memory_notifier(&ehea_mem_nb);
3532 if (ret)
3533 pr_info("failed registering memory remove notifier\n");
3534
3535 ret = crash_shutdown_register(ehea_crash_handler);
3536 if (ret)
3537 pr_info("failed registering crash handler\n");
3538
3539 ret = ibmebus_register_driver(&ehea_driver); 3585 ret = ibmebus_register_driver(&ehea_driver);
3540 if (ret) { 3586 if (ret) {
3541 pr_err("failed registering eHEA device driver on ebus\n"); 3587 pr_err("failed registering eHEA device driver on ebus\n");
3542 goto out2; 3588 goto out;
3543 } 3589 }
3544 3590
3545 ret = driver_create_file(&ehea_driver.driver, 3591 ret = driver_create_file(&ehea_driver.driver,
@@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void)
3547 if (ret) { 3593 if (ret) {
3548 pr_err("failed to register capabilities attribute, ret=%d\n", 3594 pr_err("failed to register capabilities attribute, ret=%d\n",
3549 ret); 3595 ret);
3550 goto out3; 3596 goto out2;
3551 } 3597 }
3552 3598
3553 return ret; 3599 return ret;
3554 3600
3555out3:
3556 ibmebus_unregister_driver(&ehea_driver);
3557out2: 3601out2:
3558 unregister_memory_notifier(&ehea_mem_nb); 3602 ibmebus_unregister_driver(&ehea_driver);
3559 unregister_reboot_notifier(&ehea_reboot_nb);
3560 crash_shutdown_unregister(ehea_crash_handler);
3561out: 3603out:
3562 return ret; 3604 return ret;
3563} 3605}
3564 3606
3565static void __exit ehea_module_exit(void) 3607static void __exit ehea_module_exit(void)
3566{ 3608{
3567 int ret;
3568
3569 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3609 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3570 ibmebus_unregister_driver(&ehea_driver); 3610 ibmebus_unregister_driver(&ehea_driver);
3571 unregister_reboot_notifier(&ehea_reboot_nb); 3611 ehea_unregister_memory_hooks();
3572 ret = crash_shutdown_unregister(ehea_crash_handler);
3573 if (ret)
3574 pr_info("failed unregistering crash handler\n");
3575 unregister_memory_notifier(&ehea_mem_nb);
3576 kfree(ehea_fw_handles.arr); 3612 kfree(ehea_fw_handles.arr);
3577 kfree(ehea_bcmc_regs.arr); 3613 kfree(ehea_bcmc_regs.arr);
3578 ehea_destroy_busmap(); 3614 ehea_destroy_busmap();
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 21978cc019e7..072426a72745 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1327 return ret; 1327 return ret;
1328} 1328}
1329 1329
1330static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1331{
1332 struct ibmveth_adapter *adapter = netdev_priv(dev);
1333 struct sockaddr *addr = p;
1334 u64 mac_address;
1335 int rc;
1336
1337 if (!is_valid_ether_addr(addr->sa_data))
1338 return -EADDRNOTAVAIL;
1339
1340 mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1341 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1342 if (rc) {
1343 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1344 return rc;
1345 }
1346
1347 ether_addr_copy(dev->dev_addr, addr->sa_data);
1348
1349 return 0;
1350}
1351
1330static const struct net_device_ops ibmveth_netdev_ops = { 1352static const struct net_device_ops ibmveth_netdev_ops = {
1331 .ndo_open = ibmveth_open, 1353 .ndo_open = ibmveth_open,
1332 .ndo_stop = ibmveth_close, 1354 .ndo_stop = ibmveth_close,
@@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
1337 .ndo_fix_features = ibmveth_fix_features, 1359 .ndo_fix_features = ibmveth_fix_features,
1338 .ndo_set_features = ibmveth_set_features, 1360 .ndo_set_features = ibmveth_set_features,
1339 .ndo_validate_addr = eth_validate_addr, 1361 .ndo_validate_addr = eth_validate_addr,
1340 .ndo_set_mac_address = eth_mac_addr, 1362 .ndo_set_mac_address = ibmveth_set_mac_addr,
1341#ifdef CONFIG_NET_POLL_CONTROLLER 1363#ifdef CONFIG_NET_POLL_CONTROLLER
1342 .ndo_poll_controller = ibmveth_poll_controller, 1364 .ndo_poll_controller = ibmveth_poll_controller,
1343#endif 1365#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index cb19c377e0cc..1da7d05abd38 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -875,8 +875,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
875 * The grst delay value is in 100ms units, and we'll wait a 875 * The grst delay value is in 100ms units, and we'll wait a
876 * couple counts longer to be sure we don't just miss the end. 876 * couple counts longer to be sure we don't just miss the end.
877 */ 877 */
878 grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK 878 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
879 >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 879 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
880 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
880 for (cnt = 0; cnt < grst_del + 2; cnt++) { 881 for (cnt = 0; cnt < grst_del + 2; cnt++) {
881 reg = rd32(hw, I40E_GLGEN_RSTAT); 882 reg = rd32(hw, I40E_GLGEN_RSTAT);
882 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 883 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -2828,7 +2829,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
2828 2829
2829 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2830 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2830 2831
2831 if (!status) 2832 if (!status && filter_index)
2832 *filter_index = resp->index; 2833 *filter_index = resp->index;
2833 2834
2834 return status; 2835 return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index b0665509eae6..2f583554a260 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
40 u32 val; 40 u32 val;
41 41
42 val = rd32(hw, I40E_PRTDCB_GENC); 42 val = rd32(hw, I40E_PRTDCB_GENC);
43 *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >> 43 *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
44 I40E_PRTDCB_GENC_PFCLDA_SHIFT); 44 I40E_PRTDCB_GENC_PFCLDA_SHIFT);
45} 45}
46 46
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 30cf0be7d1b2..e802b6bc067d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -990,8 +990,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
990 if (!cmd_buf) 990 if (!cmd_buf)
991 return count; 991 return count;
992 bytes_not_copied = copy_from_user(cmd_buf, buffer, count); 992 bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
993 if (bytes_not_copied < 0) 993 if (bytes_not_copied < 0) {
994 kfree(cmd_buf);
994 return bytes_not_copied; 995 return bytes_not_copied;
996 }
995 if (bytes_not_copied > 0) 997 if (bytes_not_copied > 0)
996 count -= bytes_not_copied; 998 count -= bytes_not_copied;
997 cmd_buf[count] = '\0'; 999 cmd_buf[count] = '\0';
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index c3858e7f0e66..56bdaff9f27e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1507,7 +1507,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1507 vsi->tc_config.numtc = numtc; 1507 vsi->tc_config.numtc = numtc;
1508 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1508 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1509 /* Number of queues per enabled TC */ 1509 /* Number of queues per enabled TC */
1510 num_tc_qps = vsi->alloc_queue_pairs/numtc; 1510 /* In MFP case we can have a much lower count of MSIx
1511 * vectors available and so we need to lower the used
1512 * q count.
1513 */
1514 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1515 num_tc_qps = qcount / numtc;
1511 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); 1516 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1512 1517
1513 /* Setup queue offset/count for all TCs for given VSI */ 1518 /* Setup queue offset/count for all TCs for given VSI */
@@ -2690,8 +2695,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2690 u16 qoffset, qcount; 2695 u16 qoffset, qcount;
2691 int i, n; 2696 int i, n;
2692 2697
2693 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) 2698 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2694 return; 2699 /* Reset the TC information */
2700 for (i = 0; i < vsi->num_queue_pairs; i++) {
2701 rx_ring = vsi->rx_rings[i];
2702 tx_ring = vsi->tx_rings[i];
2703 rx_ring->dcb_tc = 0;
2704 tx_ring->dcb_tc = 0;
2705 }
2706 }
2695 2707
2696 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 2708 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2697 if (!(vsi->tc_config.enabled_tc & (1 << n))) 2709 if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3836,6 +3848,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3836{ 3848{
3837 int i; 3849 int i;
3838 3850
3851 i40e_stop_misc_vector(pf);
3852 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3853 synchronize_irq(pf->msix_entries[0].vector);
3854 free_irq(pf->msix_entries[0].vector, pf);
3855 }
3856
3839 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3857 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3840 for (i = 0; i < pf->num_alloc_vsi; i++) 3858 for (i = 0; i < pf->num_alloc_vsi; i++)
3841 if (pf->vsi[i]) 3859 if (pf->vsi[i])
@@ -5246,8 +5264,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
5246 5264
5247 /* Wait for the PF's Tx queues to be disabled */ 5265 /* Wait for the PF's Tx queues to be disabled */
5248 ret = i40e_pf_wait_txq_disabled(pf); 5266 ret = i40e_pf_wait_txq_disabled(pf);
5249 if (!ret) 5267 if (ret) {
5268 /* Schedule PF reset to recover */
5269 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5270 i40e_service_event_schedule(pf);
5271 } else {
5250 i40e_pf_unquiesce_all_vsi(pf); 5272 i40e_pf_unquiesce_all_vsi(pf);
5273 }
5274
5251exit: 5275exit:
5252 return ret; 5276 return ret;
5253} 5277}
@@ -5579,7 +5603,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
5579 int i, v; 5603 int i, v;
5580 5604
5581 /* If we're down or resetting, just bail */ 5605 /* If we're down or resetting, just bail */
5582 if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5606 if (test_bit(__I40E_DOWN, &pf->state) ||
5607 test_bit(__I40E_CONFIG_BUSY, &pf->state))
5583 return; 5608 return;
5584 5609
5585 /* for each VSI/netdev 5610 /* for each VSI/netdev
@@ -9849,6 +9874,7 @@ static void i40e_remove(struct pci_dev *pdev)
9849 set_bit(__I40E_DOWN, &pf->state); 9874 set_bit(__I40E_DOWN, &pf->state);
9850 del_timer_sync(&pf->service_timer); 9875 del_timer_sync(&pf->service_timer);
9851 cancel_work_sync(&pf->service_task); 9876 cancel_work_sync(&pf->service_task);
9877 i40e_fdir_teardown(pf);
9852 9878
9853 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 9879 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
9854 i40e_free_vfs(pf); 9880 i40e_free_vfs(pf);
@@ -9875,12 +9901,6 @@ static void i40e_remove(struct pci_dev *pdev)
9875 if (pf->vsi[pf->lan_vsi]) 9901 if (pf->vsi[pf->lan_vsi])
9876 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 9902 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
9877 9903
9878 i40e_stop_misc_vector(pf);
9879 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
9880 synchronize_irq(pf->msix_entries[0].vector);
9881 free_irq(pf->msix_entries[0].vector, pf);
9882 }
9883
9884 /* shutdown and destroy the HMC */ 9904 /* shutdown and destroy the HMC */
9885 if (pf->hw.hmc.hmc_obj) { 9905 if (pf->hw.hmc.hmc_obj) {
9886 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 9906 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -10034,6 +10054,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
10034 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 10054 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10035 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 10055 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10036 10056
10057 i40e_clear_interrupt_scheme(pf);
10058
10037 if (system_state == SYSTEM_POWER_OFF) { 10059 if (system_state == SYSTEM_POWER_OFF) {
10038 pci_wake_from_d3(pdev, pf->wol_en); 10060 pci_wake_from_d3(pdev, pf->wol_en);
10039 pci_set_power_state(pdev, PCI_D3hot); 10061 pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 28429c8fbc98..039018abad4a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -725,9 +725,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
725{ 725{
726 i40e_status status; 726 i40e_status status;
727 enum i40e_nvmupd_cmd upd_cmd; 727 enum i40e_nvmupd_cmd upd_cmd;
728 bool retry_attempt = false;
728 729
729 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); 730 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
730 731
732retry:
731 switch (upd_cmd) { 733 switch (upd_cmd) {
732 case I40E_NVMUPD_WRITE_CON: 734 case I40E_NVMUPD_WRITE_CON:
733 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); 735 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -771,6 +773,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
771 *errno = -ESRCH; 773 *errno = -ESRCH;
772 break; 774 break;
773 } 775 }
776
777 /* In some circumstances, a multi-write transaction takes longer
778 * than the default 3 minute timeout on the write semaphore. If
779 * the write failed with an EBUSY status, this is likely the problem,
780 * so here we try to reacquire the semaphore then retry the write.
781 * We only do one retry, then give up.
782 */
783 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
784 !retry_attempt) {
785 i40e_status old_status = status;
786 u32 old_asq_status = hw->aq.asq_last_status;
787 u32 gtime;
788
789 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
790 if (gtime >= hw->nvm.hw_semaphore_timeout) {
791 i40e_debug(hw, I40E_DEBUG_ALL,
792 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
793 gtime, hw->nvm.hw_semaphore_timeout);
794 i40e_release_nvm(hw);
795 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
796 if (status) {
797 i40e_debug(hw, I40E_DEBUG_ALL,
798 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
799 hw->aq.asq_last_status);
800 status = old_status;
801 hw->aq.asq_last_status = old_asq_status;
802 } else {
803 retry_attempt = true;
804 goto retry;
805 }
806 }
807 }
808
774 return status; 809 return status;
775} 810}
776 811
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index af350626843e..d4b4aa7c204e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -587,6 +587,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
587} 587}
588 588
589/** 589/**
590 * i40e_get_head - Retrieve head from head writeback
591 * @tx_ring: tx ring to fetch head of
592 *
593 * Returns value of Tx ring head based on value stored
594 * in head write-back location
595 **/
596static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
597{
598 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
599
600 return le32_to_cpu(*(volatile __le32 *)head);
601}
602
603/**
590 * i40e_get_tx_pending - how many tx descriptors not processed 604 * i40e_get_tx_pending - how many tx descriptors not processed
591 * @tx_ring: the ring of descriptors 605 * @tx_ring: the ring of descriptors
592 * 606 *
@@ -595,10 +609,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
595 **/ 609 **/
596static u32 i40e_get_tx_pending(struct i40e_ring *ring) 610static u32 i40e_get_tx_pending(struct i40e_ring *ring)
597{ 611{
598 u32 ntu = ((ring->next_to_clean <= ring->next_to_use) 612 u32 head, tail;
599 ? ring->next_to_use 613
600 : ring->next_to_use + ring->count); 614 head = i40e_get_head(ring);
601 return ntu - ring->next_to_clean; 615 tail = readl(ring->tail);
616
617 if (head != tail)
618 return (head < tail) ?
619 tail - head : (tail + ring->count - head);
620
621 return 0;
602} 622}
603 623
604/** 624/**
@@ -607,6 +627,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
607 **/ 627 **/
608static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) 628static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
609{ 629{
630 u32 tx_done = tx_ring->stats.packets;
631 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
610 u32 tx_pending = i40e_get_tx_pending(tx_ring); 632 u32 tx_pending = i40e_get_tx_pending(tx_ring);
611 struct i40e_pf *pf = tx_ring->vsi->back; 633 struct i40e_pf *pf = tx_ring->vsi->back;
612 bool ret = false; 634 bool ret = false;
@@ -624,41 +646,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
624 * run the check_tx_hang logic with a transmit completion 646 * run the check_tx_hang logic with a transmit completion
625 * pending but without time to complete it yet. 647 * pending but without time to complete it yet.
626 */ 648 */
627 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 649 if ((tx_done_old == tx_done) && tx_pending) {
628 (tx_pending >= I40E_MIN_DESC_PENDING)) {
629 /* make sure it is true for two checks in a row */ 650 /* make sure it is true for two checks in a row */
630 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, 651 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
631 &tx_ring->state); 652 &tx_ring->state);
632 } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 653 } else if (tx_done_old == tx_done &&
633 (tx_pending < I40E_MIN_DESC_PENDING) && 654 (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
634 (tx_pending > 0)) {
635 if (I40E_DEBUG_FLOW & pf->hw.debug_mask) 655 if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
636 dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", 656 dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
637 tx_pending, tx_ring->queue_index); 657 tx_pending, tx_ring->queue_index);
638 pf->tx_sluggish_count++; 658 pf->tx_sluggish_count++;
639 } else { 659 } else {
640 /* update completed stats and disarm the hang check */ 660 /* update completed stats and disarm the hang check */
641 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; 661 tx_ring->tx_stats.tx_done_old = tx_done;
642 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); 662 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
643 } 663 }
644 664
645 return ret; 665 return ret;
646} 666}
647 667
648/**
649 * i40e_get_head - Retrieve head from head writeback
650 * @tx_ring: tx ring to fetch head of
651 *
652 * Returns value of Tx ring head based on value stored
653 * in head write-back location
654 **/
655static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
656{
657 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
658
659 return le32_to_cpu(*(volatile __le32 *)head);
660}
661
662#define WB_STRIDE 0x3 668#define WB_STRIDE 0x3
663 669
664/** 670/**
@@ -2356,6 +2362,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2356} 2362}
2357 2363
2358/** 2364/**
2365 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2366 * @skb: send buffer
2367 * @tx_flags: collected send information
2368 * @hdr_len: size of the packet header
2369 *
2370 * Note: Our HW can't scatter-gather more than 8 fragments to build
2371 * a packet on the wire and so we need to figure out the cases where we
2372 * need to linearize the skb.
2373 **/
2374static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
2375 const u8 hdr_len)
2376{
2377 struct skb_frag_struct *frag;
2378 bool linearize = false;
2379 unsigned int size = 0;
2380 u16 num_frags;
2381 u16 gso_segs;
2382
2383 num_frags = skb_shinfo(skb)->nr_frags;
2384 gso_segs = skb_shinfo(skb)->gso_segs;
2385
2386 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2387 u16 j = 1;
2388
2389 if (num_frags < (I40E_MAX_BUFFER_TXD))
2390 goto linearize_chk_done;
2391 /* try the simple math, if we have too many frags per segment */
2392 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2393 I40E_MAX_BUFFER_TXD) {
2394 linearize = true;
2395 goto linearize_chk_done;
2396 }
2397 frag = &skb_shinfo(skb)->frags[0];
2398 size = hdr_len;
2399 /* we might still have more fragments per segment */
2400 do {
2401 size += skb_frag_size(frag);
2402 frag++; j++;
2403 if (j == I40E_MAX_BUFFER_TXD) {
2404 if (size < skb_shinfo(skb)->gso_size) {
2405 linearize = true;
2406 break;
2407 }
2408 j = 1;
2409 size -= skb_shinfo(skb)->gso_size;
2410 if (size)
2411 j++;
2412 size += hdr_len;
2413 }
2414 num_frags--;
2415 } while (num_frags);
2416 } else {
2417 if (num_frags >= I40E_MAX_BUFFER_TXD)
2418 linearize = true;
2419 }
2420
2421linearize_chk_done:
2422 return linearize;
2423}
2424
2425/**
2359 * i40e_tx_map - Build the Tx descriptor 2426 * i40e_tx_map - Build the Tx descriptor
2360 * @tx_ring: ring to send buffer on 2427 * @tx_ring: ring to send buffer on
2361 * @skb: send buffer 2428 * @skb: send buffer
@@ -2612,6 +2679,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2612 if (tsyn) 2679 if (tsyn)
2613 tx_flags |= I40E_TX_FLAGS_TSYN; 2680 tx_flags |= I40E_TX_FLAGS_TSYN;
2614 2681
2682 if (i40e_chk_linearize(skb, tx_flags, hdr_len))
2683 if (skb_linearize(skb))
2684 goto out_drop;
2685
2615 skb_tx_timestamp(skb); 2686 skb_tx_timestamp(skb);
2616 2687
2617 /* always enable CRC insertion offload */ 2688 /* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 38449b230d60..4b0b8102cdc3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -120,6 +120,7 @@ enum i40e_dyn_idx_t {
120 120
121#define i40e_rx_desc i40e_32byte_rx_desc 121#define i40e_rx_desc i40e_32byte_rx_desc
122 122
123#define I40E_MAX_BUFFER_TXD 8
123#define I40E_MIN_TX_LEN 17 124#define I40E_MIN_TX_LEN 17
124#define I40E_MAX_DATA_PER_TXD 8192 125#define I40E_MAX_DATA_PER_TXD 8192
125 126
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index d2ff862f0726..fe13ad2def46 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -127,6 +127,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
127} 127}
128 128
129/** 129/**
130 * i40e_get_head - Retrieve head from head writeback
131 * @tx_ring: tx ring to fetch head of
132 *
133 * Returns value of Tx ring head based on value stored
134 * in head write-back location
135 **/
136static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
137{
138 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
139
140 return le32_to_cpu(*(volatile __le32 *)head);
141}
142
143/**
130 * i40e_get_tx_pending - how many tx descriptors not processed 144 * i40e_get_tx_pending - how many tx descriptors not processed
131 * @tx_ring: the ring of descriptors 145 * @tx_ring: the ring of descriptors
132 * 146 *
@@ -135,10 +149,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
135 **/ 149 **/
136static u32 i40e_get_tx_pending(struct i40e_ring *ring) 150static u32 i40e_get_tx_pending(struct i40e_ring *ring)
137{ 151{
138 u32 ntu = ((ring->next_to_clean <= ring->next_to_use) 152 u32 head, tail;
139 ? ring->next_to_use 153
140 : ring->next_to_use + ring->count); 154 head = i40e_get_head(ring);
141 return ntu - ring->next_to_clean; 155 tail = readl(ring->tail);
156
157 if (head != tail)
158 return (head < tail) ?
159 tail - head : (tail + ring->count - head);
160
161 return 0;
142} 162}
143 163
144/** 164/**
@@ -147,6 +167,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
147 **/ 167 **/
148static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) 168static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
149{ 169{
170 u32 tx_done = tx_ring->stats.packets;
171 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
150 u32 tx_pending = i40e_get_tx_pending(tx_ring); 172 u32 tx_pending = i40e_get_tx_pending(tx_ring);
151 bool ret = false; 173 bool ret = false;
152 174
@@ -163,36 +185,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
163 * run the check_tx_hang logic with a transmit completion 185 * run the check_tx_hang logic with a transmit completion
164 * pending but without time to complete it yet. 186 * pending but without time to complete it yet.
165 */ 187 */
166 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 188 if ((tx_done_old == tx_done) && tx_pending) {
167 (tx_pending >= I40E_MIN_DESC_PENDING)) {
168 /* make sure it is true for two checks in a row */ 189 /* make sure it is true for two checks in a row */
169 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, 190 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
170 &tx_ring->state); 191 &tx_ring->state);
171 } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) || 192 } else if (tx_done_old == tx_done &&
172 !(tx_pending < I40E_MIN_DESC_PENDING) || 193 (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
173 !(tx_pending > 0)) {
174 /* update completed stats and disarm the hang check */ 194 /* update completed stats and disarm the hang check */
175 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; 195 tx_ring->tx_stats.tx_done_old = tx_done;
176 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); 196 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
177 } 197 }
178 198
179 return ret; 199 return ret;
180} 200}
181 201
182/**
183 * i40e_get_head - Retrieve head from head writeback
184 * @tx_ring: tx ring to fetch head of
185 *
186 * Returns value of Tx ring head based on value stored
187 * in head write-back location
188 **/
189static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
190{
191 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
192
193 return le32_to_cpu(*(volatile __le32 *)head);
194}
195
196#define WB_STRIDE 0x3 202#define WB_STRIDE 0x3
197 203
198/** 204/**
@@ -1405,17 +1411,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1405 if (err < 0) 1411 if (err < 0)
1406 return err; 1412 return err;
1407 1413
1408 if (protocol == htons(ETH_P_IP)) { 1414 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
1409 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1415 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
1416
1417 if (iph->version == 4) {
1410 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1418 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1411 iph->tot_len = 0; 1419 iph->tot_len = 0;
1412 iph->check = 0; 1420 iph->check = 0;
1413 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1421 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1414 0, IPPROTO_TCP, 0); 1422 0, IPPROTO_TCP, 0);
1415 } else if (skb_is_gso_v6(skb)) { 1423 } else if (ipv6h->version == 6) {
1416
1417 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
1418 : ipv6_hdr(skb);
1419 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1424 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1420 ipv6h->payload_len = 0; 1425 ipv6h->payload_len = 0;
1421 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 1426 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1473,13 +1478,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1473 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 1478 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1474 } 1479 }
1475 } else if (tx_flags & I40E_TX_FLAGS_IPV6) { 1480 } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1476 if (tx_flags & I40E_TX_FLAGS_TSO) { 1481 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1477 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; 1482 if (tx_flags & I40E_TX_FLAGS_TSO)
1478 ip_hdr(skb)->check = 0; 1483 ip_hdr(skb)->check = 0;
1479 } else {
1480 *cd_tunneling |=
1481 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1482 }
1483 } 1484 }
1484 1485
1485 /* Now set the ctx descriptor fields */ 1486 /* Now set the ctx descriptor fields */
@@ -1489,6 +1490,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1489 ((skb_inner_network_offset(skb) - 1490 ((skb_inner_network_offset(skb) -
1490 skb_transport_offset(skb)) >> 1) << 1491 skb_transport_offset(skb)) >> 1) <<
1491 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 1492 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1493 if (this_ip_hdr->version == 6) {
1494 tx_flags &= ~I40E_TX_FLAGS_IPV4;
1495 tx_flags |= I40E_TX_FLAGS_IPV6;
1496 }
1497
1492 1498
1493 } else { 1499 } else {
1494 network_hdr_len = skb_network_header_len(skb); 1500 network_hdr_len = skb_network_header_len(skb);
@@ -1579,6 +1585,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1579 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); 1585 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
1580} 1586}
1581 1587
1588 /**
1589 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
1590 * @skb: send buffer
1591 * @tx_flags: collected send information
1592 * @hdr_len: size of the packet header
1593 *
1594 * Note: Our HW can't scatter-gather more than 8 fragments to build
1595 * a packet on the wire and so we need to figure out the cases where we
1596 * need to linearize the skb.
1597 **/
1598static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
1599 const u8 hdr_len)
1600{
1601 struct skb_frag_struct *frag;
1602 bool linearize = false;
1603 unsigned int size = 0;
1604 u16 num_frags;
1605 u16 gso_segs;
1606
1607 num_frags = skb_shinfo(skb)->nr_frags;
1608 gso_segs = skb_shinfo(skb)->gso_segs;
1609
1610 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
1611 u16 j = 1;
1612
1613 if (num_frags < (I40E_MAX_BUFFER_TXD))
1614 goto linearize_chk_done;
1615 /* try the simple math, if we have too many frags per segment */
1616 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
1617 I40E_MAX_BUFFER_TXD) {
1618 linearize = true;
1619 goto linearize_chk_done;
1620 }
1621 frag = &skb_shinfo(skb)->frags[0];
1622 size = hdr_len;
1623 /* we might still have more fragments per segment */
1624 do {
1625 size += skb_frag_size(frag);
1626 frag++; j++;
1627 if (j == I40E_MAX_BUFFER_TXD) {
1628 if (size < skb_shinfo(skb)->gso_size) {
1629 linearize = true;
1630 break;
1631 }
1632 j = 1;
1633 size -= skb_shinfo(skb)->gso_size;
1634 if (size)
1635 j++;
1636 size += hdr_len;
1637 }
1638 num_frags--;
1639 } while (num_frags);
1640 } else {
1641 if (num_frags >= I40E_MAX_BUFFER_TXD)
1642 linearize = true;
1643 }
1644
1645linearize_chk_done:
1646 return linearize;
1647}
1648
1582/** 1649/**
1583 * i40e_tx_map - Build the Tx descriptor 1650 * i40e_tx_map - Build the Tx descriptor
1584 * @tx_ring: ring to send buffer on 1651 * @tx_ring: ring to send buffer on
@@ -1853,6 +1920,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1853 else if (tso) 1920 else if (tso)
1854 tx_flags |= I40E_TX_FLAGS_TSO; 1921 tx_flags |= I40E_TX_FLAGS_TSO;
1855 1922
1923 if (i40e_chk_linearize(skb, tx_flags, hdr_len))
1924 if (skb_linearize(skb))
1925 goto out_drop;
1926
1856 skb_tx_timestamp(skb); 1927 skb_tx_timestamp(skb);
1857 1928
1858 /* always enable CRC insertion offload */ 1929 /* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index ffdda716813e..1e49bb1fbac1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -120,6 +120,7 @@ enum i40e_dyn_idx_t {
120 120
121#define i40e_rx_desc i40e_32byte_rx_desc 121#define i40e_rx_desc i40e_32byte_rx_desc
122 122
123#define I40E_MAX_BUFFER_TXD 8
123#define I40E_MIN_TX_LEN 17 124#define I40E_MIN_TX_LEN 17
124#define I40E_MAX_DATA_PER_TXD 8192 125#define I40E_MAX_DATA_PER_TXD 8192
125 126
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 4e789479f00f..b66e03d9711f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
81{ 81{
82 u32 loopback_ok = 0; 82 u32 loopback_ok = 0;
83 int i; 83 int i;
84 84 bool gro_enabled;
85 85
86 priv->loopback_ok = 0; 86 priv->loopback_ok = 0;
87 priv->validate_loopback = 1; 87 priv->validate_loopback = 1;
88 gro_enabled = priv->dev->features & NETIF_F_GRO;
88 89
89 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 90 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
91 priv->dev->features &= ~NETIF_F_GRO;
90 92
91 /* xmit */ 93 /* xmit */
92 if (mlx4_en_test_loopback_xmit(priv)) { 94 if (mlx4_en_test_loopback_xmit(priv)) {
@@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
108mlx4_en_test_loopback_exit: 110mlx4_en_test_loopback_exit:
109 111
110 priv->validate_loopback = 0; 112 priv->validate_loopback = 0;
113
114 if (gro_enabled)
115 priv->dev->features |= NETIF_F_GRO;
116
111 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 117 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
112 return !loopback_ok; 118 return !loopback_ok;
113} 119}
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2bb8553bd905..eda29dbbfcd2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -412,7 +412,6 @@ err_icm:
412 412
413EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 413EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
414 414
415#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
416int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, 415int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
417 enum mlx4_update_qp_attr attr, 416 enum mlx4_update_qp_attr attr,
418 struct mlx4_update_qp_params *params) 417 struct mlx4_update_qp_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 486e3d26cd4a..d97ca88c55b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
713 struct mlx4_vport_oper_state *vp_oper; 713 struct mlx4_vport_oper_state *vp_oper;
714 struct mlx4_priv *priv; 714 struct mlx4_priv *priv;
715 u32 qp_type; 715 u32 qp_type;
716 int port; 716 int port, err = 0;
717 717
718 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; 718 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
719 priv = mlx4_priv(dev); 719 priv = mlx4_priv(dev);
@@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
738 } else { 738 } else {
739 struct mlx4_update_qp_params params = {.flags = 0}; 739 struct mlx4_update_qp_params params = {.flags = 0};
740 740
741 mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params); 741 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
742 if (err)
743 goto out;
742 } 744 }
743 } 745 }
744 746
@@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
773 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; 775 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
774 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; 776 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
775 } 777 }
776 return 0; 778out:
779 return err;
777} 780}
778 781
779static int mpt_mask(struct mlx4_dev *dev) 782static int mpt_mask(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 44e8d7d25547..57a6e6cd74fc 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev)
1239 if (mac->phydev) 1239 if (mac->phydev)
1240 phy_start(mac->phydev); 1240 phy_start(mac->phydev);
1241 1241
1242 init_timer(&mac->tx->clean_timer); 1242 setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
1243 mac->tx->clean_timer.function = pasemi_mac_tx_timer; 1243 (unsigned long)mac->tx);
1244 mac->tx->clean_timer.data = (unsigned long)mac->tx; 1244 mod_timer(&mac->tx->clean_timer, jiffies + HZ);
1245 mac->tx->clean_timer.expires = jiffies+HZ;
1246 add_timer(&mac->tx->clean_timer);
1247 1245
1248 return 0; 1246 return 0;
1249 1247
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 6e426ae94692..0a5e204a0179 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -354,7 +354,7 @@ struct cmd_desc_type0 {
354 354
355} __attribute__ ((aligned(64))); 355} __attribute__ ((aligned(64)));
356 356
357/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ 357/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
358struct rcv_desc { 358struct rcv_desc {
359 __le16 reference_handle; 359 __le16 reference_handle;
360 __le16 reserved; 360 __le16 reserved;
@@ -499,7 +499,7 @@ struct uni_data_desc{
499#define NETXEN_IMAGE_START 0x43000 /* compressed image */ 499#define NETXEN_IMAGE_START 0x43000 /* compressed image */
500#define NETXEN_SECONDARY_START 0x200000 /* backup images */ 500#define NETXEN_SECONDARY_START 0x200000 /* backup images */
501#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ 501#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
502#define NETXEN_USER_START 0x3E8000 /* Firmare info */ 502#define NETXEN_USER_START 0x3E8000 /* Firmware info */
503#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ 503#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
504#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ 504#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */
505 505
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index fa4317611fd6..f221126a5c4e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -314,7 +314,7 @@ struct qlcnic_fdt {
314#define QLCNIC_BRDCFG_START 0x4000 /* board config */ 314#define QLCNIC_BRDCFG_START 0x4000 /* board config */
315#define QLCNIC_BOOTLD_START 0x10000 /* bootld */ 315#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
316#define QLCNIC_IMAGE_START 0x43000 /* compressed image */ 316#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
317#define QLCNIC_USER_START 0x3E8000 /* Firmare info */ 317#define QLCNIC_USER_START 0x3E8000 /* Firmware info */
318 318
319#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) 319#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
320#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) 320#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ad0020af2193..c70ab40d8698 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2561 int rc = -EINVAL; 2561 int rc = -EINVAL;
2562 2562
2563 if (!rtl_fw_format_ok(tp, rtl_fw)) { 2563 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2564 netif_err(tp, ifup, dev, "invalid firwmare\n"); 2564 netif_err(tp, ifup, dev, "invalid firmware\n");
2565 goto out; 2565 goto out;
2566 } 2566 }
2567 2567
@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
5067 RTL_W8(ChipCmd, CmdReset); 5067 RTL_W8(ChipCmd, CmdReset);
5068 5068
5069 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); 5069 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
5070
5071 netdev_reset_queue(tp->dev);
5072} 5070}
5073 5071
5074static void rtl_request_uncached_firmware(struct rtl8169_private *tp) 5072static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7049 u32 status, len; 7047 u32 status, len;
7050 u32 opts[2]; 7048 u32 opts[2];
7051 int frags; 7049 int frags;
7052 bool stop_queue;
7053 7050
7054 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { 7051 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
7055 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); 7052 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7090 7087
7091 txd->opts2 = cpu_to_le32(opts[1]); 7088 txd->opts2 = cpu_to_le32(opts[1]);
7092 7089
7093 netdev_sent_queue(dev, skb->len);
7094
7095 skb_tx_timestamp(skb); 7090 skb_tx_timestamp(skb);
7096 7091
7097 /* Force memory writes to complete before releasing descriptor */ 7092 /* Force memory writes to complete before releasing descriptor */
@@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7106 7101
7107 tp->cur_tx += frags + 1; 7102 tp->cur_tx += frags + 1;
7108 7103
7109 stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS); 7104 RTL_W8(TxPoll, NPQ);
7110 7105
7111 if (!skb->xmit_more || stop_queue || 7106 mmiowb();
7112 netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
7113 RTL_W8(TxPoll, NPQ);
7114
7115 mmiowb();
7116 }
7117 7107
7118 if (stop_queue) { 7108 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
7119 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must 7109 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
7120 * not miss a ring update when it notices a stopped queue. 7110 * not miss a ring update when it notices a stopped queue.
7121 */ 7111 */
@@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
7198static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) 7188static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7199{ 7189{
7200 unsigned int dirty_tx, tx_left; 7190 unsigned int dirty_tx, tx_left;
7201 unsigned int bytes_compl = 0, pkts_compl = 0;
7202 7191
7203 dirty_tx = tp->dirty_tx; 7192 dirty_tx = tp->dirty_tx;
7204 smp_rmb(); 7193 smp_rmb();
@@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7222 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, 7211 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
7223 tp->TxDescArray + entry); 7212 tp->TxDescArray + entry);
7224 if (status & LastFrag) { 7213 if (status & LastFrag) {
7225 pkts_compl++; 7214 u64_stats_update_begin(&tp->tx_stats.syncp);
7226 bytes_compl += tx_skb->skb->len; 7215 tp->tx_stats.packets++;
7216 tp->tx_stats.bytes += tx_skb->skb->len;
7217 u64_stats_update_end(&tp->tx_stats.syncp);
7227 dev_kfree_skb_any(tx_skb->skb); 7218 dev_kfree_skb_any(tx_skb->skb);
7228 tx_skb->skb = NULL; 7219 tx_skb->skb = NULL;
7229 } 7220 }
@@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7232 } 7223 }
7233 7224
7234 if (tp->dirty_tx != dirty_tx) { 7225 if (tp->dirty_tx != dirty_tx) {
7235 netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
7236
7237 u64_stats_update_begin(&tp->tx_stats.syncp);
7238 tp->tx_stats.packets += pkts_compl;
7239 tp->tx_stats.bytes += bytes_compl;
7240 u64_stats_update_end(&tp->tx_stats.syncp);
7241
7242 tp->dirty_tx = dirty_tx; 7226 tp->dirty_tx = dirty_tx;
7243 /* Sync with rtl8169_start_xmit: 7227 /* Sync with rtl8169_start_xmit:
7244 * - publish dirty_tx ring index (write barrier) 7228 * - publish dirty_tx ring index (write barrier)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 4da8bd263997..736d5d1624a1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = {
508 .tpauser = 1, 508 .tpauser = 1,
509 .hw_swap = 1, 509 .hw_swap = 1,
510 .rmiimode = 1, 510 .rmiimode = 1,
511 .shift_rd0 = 1,
512}; 511};
513 512
514static void sh_eth_set_rate_sh7724(struct net_device *ndev) 513static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev)
1392 msleep(2); /* max frame time at 10 Mbps < 1250 us */ 1391 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1393 sh_eth_get_stats(ndev); 1392 sh_eth_get_stats(ndev);
1394 sh_eth_reset(ndev); 1393 sh_eth_reset(ndev);
1394
1395 /* Set MAC address again */
1396 update_mac_address(ndev);
1395} 1397}
1396 1398
1397/* free Tx skb function */ 1399/* free Tx skb function */
@@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev)
1407 txdesc = &mdp->tx_ring[entry]; 1409 txdesc = &mdp->tx_ring[entry];
1408 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1410 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1409 break; 1411 break;
1412 /* TACT bit must be checked before all the following reads */
1413 rmb();
1410 /* Free the original skb. */ 1414 /* Free the original skb. */
1411 if (mdp->tx_skbuff[entry]) { 1415 if (mdp->tx_skbuff[entry]) {
1412 dma_unmap_single(&ndev->dev, txdesc->addr, 1416 dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1444 limit = boguscnt; 1448 limit = boguscnt;
1445 rxdesc = &mdp->rx_ring[entry]; 1449 rxdesc = &mdp->rx_ring[entry];
1446 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1450 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1451 /* RACT bit must be checked before all the following reads */
1452 rmb();
1447 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1453 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1448 pkt_len = rxdesc->frame_length; 1454 pkt_len = rxdesc->frame_length;
1449 1455
@@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1455 1461
1456 /* In case of almost all GETHER/ETHERs, the Receive Frame State 1462 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1457 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to 1463 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1458 * bit 0. However, in case of the R8A7740, R8A779x, and 1464 * bit 0. However, in case of the R8A7740 and R7S72100
1459 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the 1465 * the RFS bits are from bit 25 to bit 16. So, the
1460 * driver needs right shifting by 16. 1466 * driver needs right shifting by 16.
1461 */ 1467 */
1462 if (mdp->cd->shift_rd0) 1468 if (mdp->cd->shift_rd0)
@@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1523 skb_checksum_none_assert(skb); 1529 skb_checksum_none_assert(skb);
1524 rxdesc->addr = dma_addr; 1530 rxdesc->addr = dma_addr;
1525 } 1531 }
1532 wmb(); /* RACT bit must be set after all the above writes */
1526 if (entry >= mdp->num_rx_ring - 1) 1533 if (entry >= mdp->num_rx_ring - 1)
1527 rxdesc->status |= 1534 rxdesc->status |=
1528 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1535 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
@@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1535 /* If we don't need to check status, don't. -KDU */ 1542 /* If we don't need to check status, don't. -KDU */
1536 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { 1543 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1537 /* fix the values for the next receiving if RDE is set */ 1544 /* fix the values for the next receiving if RDE is set */
1538 if (intr_status & EESR_RDE) { 1545 if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
1539 u32 count = (sh_eth_read(ndev, RDFAR) - 1546 u32 count = (sh_eth_read(ndev, RDFAR) -
1540 sh_eth_read(ndev, RDLAR)) >> 4; 1547 sh_eth_read(ndev, RDLAR)) >> 4;
1541 1548
@@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2174 } 2181 }
2175 spin_unlock_irqrestore(&mdp->lock, flags); 2182 spin_unlock_irqrestore(&mdp->lock, flags);
2176 2183
2177 if (skb_padto(skb, ETH_ZLEN)) 2184 if (skb_put_padto(skb, ETH_ZLEN))
2178 return NETDEV_TX_OK; 2185 return NETDEV_TX_OK;
2179 2186
2180 entry = mdp->cur_tx % mdp->num_tx_ring; 2187 entry = mdp->cur_tx % mdp->num_tx_ring;
@@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2192 } 2199 }
2193 txdesc->buffer_length = skb->len; 2200 txdesc->buffer_length = skb->len;
2194 2201
2202 wmb(); /* TACT bit must be set after all the above writes */
2195 if (entry >= mdp->num_tx_ring - 1) 2203 if (entry >= mdp->num_tx_ring - 1)
2196 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 2204 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2197 else 2205 else
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index e5a15a4c4e8f..a5d1e6ea7d58 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -1280,9 +1280,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
1280 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); 1280 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1281 1281
1282 if (enable) 1282 if (enable)
1283 val |= 1 << rocker_port->pport; 1283 val |= 1ULL << rocker_port->pport;
1284 else 1284 else
1285 val &= ~(1 << rocker_port->pport); 1285 val &= ~(1ULL << rocker_port->pport);
1286 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); 1286 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1287} 1287}
1288 1288
@@ -4241,6 +4241,8 @@ static int rocker_probe_ports(struct rocker *rocker)
4241 4241
4242 alloc_size = sizeof(struct rocker_port *) * rocker->port_count; 4242 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
4243 rocker->ports = kmalloc(alloc_size, GFP_KERNEL); 4243 rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
4244 if (!rocker->ports)
4245 return -ENOMEM;
4244 for (i = 0; i < rocker->port_count; i++) { 4246 for (i = 0; i < rocker->port_count; i++) {
4245 err = rocker_probe_port(rocker, i); 4247 err = rocker_probe_port(rocker, i);
4246 if (err) 4248 if (err)
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 6b33127ab352..3449893aea8d 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev)
1070 smc->packets_waiting = 0; 1070 smc->packets_waiting = 0;
1071 1071
1072 smc_reset(dev); 1072 smc_reset(dev);
1073 init_timer(&smc->media); 1073 setup_timer(&smc->media, media_check, (u_long)dev);
1074 smc->media.function = media_check; 1074 mod_timer(&smc->media, jiffies + HZ);
1075 smc->media.data = (u_long) dev;
1076 smc->media.expires = jiffies + HZ;
1077 add_timer(&smc->media);
1078 1075
1079 return 0; 1076 return 0;
1080} /* smc_open */ 1077} /* smc_open */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 88a55f95fe09..209ee1b27f8d 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -91,6 +91,10 @@ static const char version[] =
91 91
92#include "smc91x.h" 92#include "smc91x.h"
93 93
94#if defined(CONFIG_ASSABET_NEPONSET)
95#include <mach/neponset.h>
96#endif
97
94#ifndef SMC_NOWAIT 98#ifndef SMC_NOWAIT
95# define SMC_NOWAIT 0 99# define SMC_NOWAIT 0
96#endif 100#endif
@@ -2355,8 +2359,9 @@ static int smc_drv_probe(struct platform_device *pdev)
2355 ret = smc_request_attrib(pdev, ndev); 2359 ret = smc_request_attrib(pdev, ndev);
2356 if (ret) 2360 if (ret)
2357 goto out_release_io; 2361 goto out_release_io;
2358#if defined(CONFIG_SA1100_ASSABET) 2362#if defined(CONFIG_ASSABET_NEPONSET)
2359 neponset_ncr_set(NCR_ENET_OSC_EN); 2363 if (machine_is_assabet() && machine_has_neponset())
2364 neponset_ncr_set(NCR_ENET_OSC_EN);
2360#endif 2365#endif
2361 platform_set_drvdata(pdev, ndev); 2366 platform_set_drvdata(pdev, ndev);
2362 ret = smc_enable_device(pdev); 2367 ret = smc_enable_device(pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index be67baf5f677..3a18501d1068 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -39,14 +39,7 @@
39 * Define your architecture specific bus configuration parameters here. 39 * Define your architecture specific bus configuration parameters here.
40 */ 40 */
41 41
42#if defined(CONFIG_ARCH_LUBBOCK) ||\ 42#if defined(CONFIG_ARM)
43 defined(CONFIG_MACH_MAINSTONE) ||\
44 defined(CONFIG_MACH_ZYLONITE) ||\
45 defined(CONFIG_MACH_LITTLETON) ||\
46 defined(CONFIG_MACH_ZYLONITE2) ||\
47 defined(CONFIG_ARCH_VIPER) ||\
48 defined(CONFIG_MACH_STARGATE2) ||\
49 defined(CONFIG_ARCH_VERSATILE)
50 43
51#include <asm/mach-types.h> 44#include <asm/mach-types.h>
52 45
@@ -74,95 +67,8 @@
74/* We actually can't write halfwords properly if not word aligned */ 67/* We actually can't write halfwords properly if not word aligned */
75static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) 68static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
76{ 69{
77 if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) { 70 if ((machine_is_mainstone() || machine_is_stargate2() ||
78 unsigned int v = val << 16; 71 machine_is_pxa_idp()) && reg & 2) {
79 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
80 writel(v, ioaddr + (reg & ~2));
81 } else {
82 writew(val, ioaddr + reg);
83 }
84}
85
86#elif defined(CONFIG_SA1100_PLEB)
87/* We can only do 16-bit reads and writes in the static memory space. */
88#define SMC_CAN_USE_8BIT 1
89#define SMC_CAN_USE_16BIT 1
90#define SMC_CAN_USE_32BIT 0
91#define SMC_IO_SHIFT 0
92#define SMC_NOWAIT 1
93
94#define SMC_inb(a, r) readb((a) + (r))
95#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
96#define SMC_inw(a, r) readw((a) + (r))
97#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
98#define SMC_outb(v, a, r) writeb(v, (a) + (r))
99#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
100#define SMC_outw(v, a, r) writew(v, (a) + (r))
101#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
102
103#define SMC_IRQ_FLAGS (-1)
104
105#elif defined(CONFIG_SA1100_ASSABET)
106
107#include <mach/neponset.h>
108
109/* We can only do 8-bit reads and writes in the static memory space. */
110#define SMC_CAN_USE_8BIT 1
111#define SMC_CAN_USE_16BIT 0
112#define SMC_CAN_USE_32BIT 0
113#define SMC_NOWAIT 1
114
115/* The first two address lines aren't connected... */
116#define SMC_IO_SHIFT 2
117
118#define SMC_inb(a, r) readb((a) + (r))
119#define SMC_outb(v, a, r) writeb(v, (a) + (r))
120#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
121#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
122#define SMC_IRQ_FLAGS (-1) /* from resource */
123
124#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \
125 defined(CONFIG_MACH_NOMADIK_8815NHK)
126
127#define SMC_CAN_USE_8BIT 0
128#define SMC_CAN_USE_16BIT 1
129#define SMC_CAN_USE_32BIT 0
130#define SMC_IO_SHIFT 0
131#define SMC_NOWAIT 1
132
133#define SMC_inw(a, r) readw((a) + (r))
134#define SMC_outw(v, a, r) writew(v, (a) + (r))
135#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
136#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
137
138#elif defined(CONFIG_ARCH_INNOKOM) || \
139 defined(CONFIG_ARCH_PXA_IDP) || \
140 defined(CONFIG_ARCH_RAMSES) || \
141 defined(CONFIG_ARCH_PCM027)
142
143#define SMC_CAN_USE_8BIT 1
144#define SMC_CAN_USE_16BIT 1
145#define SMC_CAN_USE_32BIT 1
146#define SMC_IO_SHIFT 0
147#define SMC_NOWAIT 1
148#define SMC_USE_PXA_DMA 1
149
150#define SMC_inb(a, r) readb((a) + (r))
151#define SMC_inw(a, r) readw((a) + (r))
152#define SMC_inl(a, r) readl((a) + (r))
153#define SMC_outb(v, a, r) writeb(v, (a) + (r))
154#define SMC_outl(v, a, r) writel(v, (a) + (r))
155#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
156#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
157#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
158#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
159#define SMC_IRQ_FLAGS (-1) /* from resource */
160
161/* We actually can't write halfwords properly if not word aligned */
162static inline void
163SMC_outw(u16 val, void __iomem *ioaddr, int reg)
164{
165 if (reg & 2) {
166 unsigned int v = val << 16; 72 unsigned int v = val << 16;
167 v |= readl(ioaddr + (reg & ~2)) & 0xffff; 73 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
168 writel(v, ioaddr + (reg & ~2)); 74 writel(v, ioaddr + (reg & ~2));
@@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
237#define RPC_LSA_DEFAULT RPC_LED_100_10 143#define RPC_LSA_DEFAULT RPC_LED_100_10
238#define RPC_LSB_DEFAULT RPC_LED_TX_RX 144#define RPC_LSB_DEFAULT RPC_LED_TX_RX
239 145
240#elif defined(CONFIG_ARCH_MSM)
241
242#define SMC_CAN_USE_8BIT 0
243#define SMC_CAN_USE_16BIT 1
244#define SMC_CAN_USE_32BIT 0
245#define SMC_NOWAIT 1
246
247#define SMC_inw(a, r) readw((a) + (r))
248#define SMC_outw(v, a, r) writew(v, (a) + (r))
249#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
250#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
251
252#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
253
254#elif defined(CONFIG_COLDFIRE) 146#elif defined(CONFIG_COLDFIRE)
255 147
256#define SMC_CAN_USE_8BIT 0 148#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 55e89b3838f1..a0ea84fe6519 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
310 spin_lock_irqsave(&priv->lock, flags); 310 spin_lock_irqsave(&priv->lock, flags);
311 if (!priv->eee_active) { 311 if (!priv->eee_active) {
312 priv->eee_active = 1; 312 priv->eee_active = 1;
313 init_timer(&priv->eee_ctrl_timer); 313 setup_timer(&priv->eee_ctrl_timer,
314 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; 314 stmmac_eee_ctrl_timer,
315 priv->eee_ctrl_timer.data = (unsigned long)priv; 315 (unsigned long)priv);
316 priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer); 316 mod_timer(&priv->eee_ctrl_timer,
317 add_timer(&priv->eee_ctrl_timer); 317 STMMAC_LPI_T(eee_timer));
318 318
319 priv->hw->mac->set_eee_timer(priv->hw, 319 priv->hw->mac->set_eee_timer(priv->hw,
320 STMMAC_DEFAULT_LIT_LS, 320 STMMAC_DEFAULT_LIT_LS,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 4b51f903fb73..0c5842aeb807 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type)
6989 *flow_type = IP_USER_FLOW; 6989 *flow_type = IP_USER_FLOW;
6990 break; 6990 break;
6991 default: 6991 default:
6992 return 0; 6992 return -EINVAL;
6993 } 6993 }
6994 6994
6995 return 1; 6995 return 0;
6996} 6996}
6997 6997
6998static int niu_ethflow_to_class(int flow_type, u64 *class) 6998static int niu_ethflow_to_class(int flow_type, u64 *class)
@@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
7198 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> 7198 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
7199 TCAM_V4KEY0_CLASS_CODE_SHIFT; 7199 TCAM_V4KEY0_CLASS_CODE_SHIFT;
7200 ret = niu_class_to_ethflow(class, &fsp->flow_type); 7200 ret = niu_class_to_ethflow(class, &fsp->flow_type);
7201
7202 if (ret < 0) { 7201 if (ret < 0) {
7203 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", 7202 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
7204 parent->index); 7203 parent->index);
7205 ret = -EINVAL;
7206 goto out; 7204 goto out;
7207 } 7205 }
7208 7206
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 3bc992cd70b7..f6a71092e135 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -50,7 +50,7 @@ config TI_DAVINCI_CPDMA
50 will be called davinci_cpdma. This is recommended. 50 will be called davinci_cpdma. This is recommended.
51 51
52config TI_CPSW_PHY_SEL 52config TI_CPSW_PHY_SEL
53 boolean "TI CPSW Switch Phy sel Support" 53 bool "TI CPSW Switch Phy sel Support"
54 depends on TI_CPSW 54 depends on TI_CPSW
55 ---help--- 55 ---help---
56 This driver supports configuring of the phy mode connected to 56 This driver supports configuring of the phy mode connected to
@@ -77,7 +77,7 @@ config TI_CPSW
77 will be called cpsw. 77 will be called cpsw.
78 78
79config TI_CPTS 79config TI_CPTS
80 boolean "TI Common Platform Time Sync (CPTS) Support" 80 bool "TI Common Platform Time Sync (CPTS) Support"
81 depends on TI_CPSW 81 depends on TI_CPSW
82 select PTP_1588_CLOCK 82 select PTP_1588_CLOCK
83 ---help--- 83 ---help---
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d8dd0d2182e..a1bbaf6352ba 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
1103 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1103 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1104 port_mask, ALE_VLAN, slave->port_vlan, 0); 1104 port_mask, ALE_VLAN, slave->port_vlan, 0);
1105 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, 1105 cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
1106 priv->host_port, ALE_VLAN, slave->port_vlan); 1106 priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
1107} 1107}
1108 1108
1109static void soft_reset_slave(struct cpsw_slave *slave) 1109static void soft_reset_slave(struct cpsw_slave *slave)
@@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev)
2466 return 0; 2466 return 0;
2467} 2467}
2468 2468
2469#ifdef CONFIG_PM_SLEEP
2469static int cpsw_suspend(struct device *dev) 2470static int cpsw_suspend(struct device *dev)
2470{ 2471{
2471 struct platform_device *pdev = to_platform_device(dev); 2472 struct platform_device *pdev = to_platform_device(dev);
@@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev)
2518 } 2519 }
2519 return 0; 2520 return 0;
2520} 2521}
2522#endif
2521 2523
2522static const struct dev_pm_ops cpsw_pm_ops = { 2524static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2523 .suspend = cpsw_suspend,
2524 .resume = cpsw_resume,
2525};
2526 2525
2527static const struct of_device_id cpsw_of_mtable[] = { 2526static const struct of_device_id cpsw_of_mtable[] = {
2528 { .compatible = "ti,cpsw", }, 2527 { .compatible = "ti,cpsw", },
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 98655b44b97e..c00084d689f3 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev)
423 return 0; 423 return 0;
424} 424}
425 425
426#ifdef CONFIG_PM_SLEEP
426static int davinci_mdio_suspend(struct device *dev) 427static int davinci_mdio_suspend(struct device *dev)
427{ 428{
428 struct davinci_mdio_data *data = dev_get_drvdata(dev); 429 struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev)
464 465
465 return 0; 466 return 0;
466} 467}
468#endif
467 469
468static const struct dev_pm_ops davinci_mdio_pm_ops = { 470static const struct dev_pm_ops davinci_mdio_pm_ops = {
469 .suspend_late = davinci_mdio_suspend, 471 SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
470 .resume_early = davinci_mdio_resume,
471}; 472};
472 473
473#if IS_ENABLED(CONFIG_OF) 474#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 44ff8d7c64a5..5138407941cf 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev)
938 int i; 938 int i;
939 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 939 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
940 940
941 if (dev->flags & IFF_ALLMULTI) { 941 if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
942 for (i = 0; i < ETH_ALEN; i++) { 942 for (i = 0; i < ETH_ALEN; i++) {
943 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); 943 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
944 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); 944 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 1e51c6bf3ae1..8362aef0c15e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
654 } /* else everything is zero */ 654 } /* else everything is zero */
655} 655}
656 656
657/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
658#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
659
657/* Get packet from user space buffer */ 660/* Get packet from user space buffer */
658static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, 661static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
659 struct iov_iter *from, int noblock) 662 struct iov_iter *from, int noblock)
660{ 663{
661 int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); 664 int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
662 struct sk_buff *skb; 665 struct sk_buff *skb;
663 struct macvlan_dev *vlan; 666 struct macvlan_dev *vlan;
664 unsigned long total_len = iov_iter_count(from); 667 unsigned long total_len = iov_iter_count(from);
@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
722 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); 725 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
723 } 726 }
724 727
725 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, 728 skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
726 linear, noblock, &err); 729 linear, noblock, &err);
727 if (!skb) 730 if (!skb)
728 goto err; 731 goto err;
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index 9e3af54c9010..32efbd48f326 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
92#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate" 92#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
93#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew" 93#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
94#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp" 94#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
95#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
96#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
95 97
96#define XGBE_PHY_SPEEDS 3 98#define XGBE_PHY_SPEEDS 3
97#define XGBE_PHY_SPEED_1000 0 99#define XGBE_PHY_SPEED_1000 0
@@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
177#define SPEED_10000_BLWC 0 179#define SPEED_10000_BLWC 0
178#define SPEED_10000_CDR 0x7 180#define SPEED_10000_CDR 0x7
179#define SPEED_10000_PLL 0x1 181#define SPEED_10000_PLL 0x1
180#define SPEED_10000_PQ 0x1e 182#define SPEED_10000_PQ 0x12
181#define SPEED_10000_RATE 0x0 183#define SPEED_10000_RATE 0x0
182#define SPEED_10000_TXAMP 0xa 184#define SPEED_10000_TXAMP 0xa
183#define SPEED_10000_WORD 0x7 185#define SPEED_10000_WORD 0x7
186#define SPEED_10000_DFE_TAP_CONFIG 0x1
187#define SPEED_10000_DFE_TAP_ENABLE 0x7f
184 188
185#define SPEED_2500_BLWC 1 189#define SPEED_2500_BLWC 1
186#define SPEED_2500_CDR 0x2 190#define SPEED_2500_CDR 0x2
@@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
189#define SPEED_2500_RATE 0x1 193#define SPEED_2500_RATE 0x1
190#define SPEED_2500_TXAMP 0xf 194#define SPEED_2500_TXAMP 0xf
191#define SPEED_2500_WORD 0x1 195#define SPEED_2500_WORD 0x1
196#define SPEED_2500_DFE_TAP_CONFIG 0x3
197#define SPEED_2500_DFE_TAP_ENABLE 0x0
192 198
193#define SPEED_1000_BLWC 1 199#define SPEED_1000_BLWC 1
194#define SPEED_1000_CDR 0x2 200#define SPEED_1000_CDR 0x2
@@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
197#define SPEED_1000_RATE 0x3 203#define SPEED_1000_RATE 0x3
198#define SPEED_1000_TXAMP 0xf 204#define SPEED_1000_TXAMP 0xf
199#define SPEED_1000_WORD 0x1 205#define SPEED_1000_WORD 0x1
206#define SPEED_1000_DFE_TAP_CONFIG 0x3
207#define SPEED_1000_DFE_TAP_ENABLE 0x0
200 208
201/* SerDes RxTx register offsets */ 209/* SerDes RxTx register offsets */
210#define RXTX_REG6 0x0018
202#define RXTX_REG20 0x0050 211#define RXTX_REG20 0x0050
212#define RXTX_REG22 0x0058
203#define RXTX_REG114 0x01c8 213#define RXTX_REG114 0x01c8
214#define RXTX_REG129 0x0204
204 215
205/* SerDes RxTx register entry bit positions and sizes */ 216/* SerDes RxTx register entry bit positions and sizes */
217#define RXTX_REG6_RESETB_RXD_INDEX 8
218#define RXTX_REG6_RESETB_RXD_WIDTH 1
206#define RXTX_REG20_BLWC_ENA_INDEX 2 219#define RXTX_REG20_BLWC_ENA_INDEX 2
207#define RXTX_REG20_BLWC_ENA_WIDTH 1 220#define RXTX_REG20_BLWC_ENA_WIDTH 1
208#define RXTX_REG114_PQ_REG_INDEX 9 221#define RXTX_REG114_PQ_REG_INDEX 9
209#define RXTX_REG114_PQ_REG_WIDTH 7 222#define RXTX_REG114_PQ_REG_WIDTH 7
223#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
224#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
210 225
211/* Bit setting and getting macros 226/* Bit setting and getting macros
212 * The get macro will extract the current bit field value from within 227 * The get macro will extract the current bit field value from within
@@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
333 SPEED_10000_TXAMP, 348 SPEED_10000_TXAMP,
334}; 349};
335 350
351static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
352 SPEED_1000_DFE_TAP_CONFIG,
353 SPEED_2500_DFE_TAP_CONFIG,
354 SPEED_10000_DFE_TAP_CONFIG,
355};
356
357static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
358 SPEED_1000_DFE_TAP_ENABLE,
359 SPEED_2500_DFE_TAP_ENABLE,
360 SPEED_10000_DFE_TAP_ENABLE,
361};
362
336enum amd_xgbe_phy_an { 363enum amd_xgbe_phy_an {
337 AMD_XGBE_AN_READY = 0, 364 AMD_XGBE_AN_READY = 0,
338 AMD_XGBE_AN_PAGE_RECEIVED, 365 AMD_XGBE_AN_PAGE_RECEIVED,
@@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv {
393 u32 serdes_cdr_rate[XGBE_PHY_SPEEDS]; 420 u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
394 u32 serdes_pq_skew[XGBE_PHY_SPEEDS]; 421 u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
395 u32 serdes_tx_amp[XGBE_PHY_SPEEDS]; 422 u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
423 u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
424 u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
396 425
397 /* Auto-negotiation state machine support */ 426 /* Auto-negotiation state machine support */
398 struct mutex an_mutex; 427 struct mutex an_mutex;
@@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
481 status = XSIR0_IOREAD(priv, SIR0_STATUS); 510 status = XSIR0_IOREAD(priv, SIR0_STATUS);
482 if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && 511 if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
483 XSIR_GET_BITS(status, SIR0_STATUS, TX_READY)) 512 XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
484 return; 513 goto rx_reset;
485 } 514 }
486 515
487 netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n", 516 netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
488 status); 517 status);
518
519rx_reset:
520 /* Perform Rx reset for the DFE changes */
521 XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
522 XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
489} 523}
490 524
491static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) 525static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
@@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
534 priv->serdes_blwc[XGBE_PHY_SPEED_10000]); 568 priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
535 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, 569 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
536 priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]); 570 priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
571 XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
572 priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
573 XRXTX_IOWRITE(priv, RXTX_REG22,
574 priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
537 575
538 amd_xgbe_phy_serdes_complete_ratechange(phydev); 576 amd_xgbe_phy_serdes_complete_ratechange(phydev);
539 577
@@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
586 priv->serdes_blwc[XGBE_PHY_SPEED_2500]); 624 priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
587 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, 625 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
588 priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]); 626 priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
627 XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
628 priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
629 XRXTX_IOWRITE(priv, RXTX_REG22,
630 priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
589 631
590 amd_xgbe_phy_serdes_complete_ratechange(phydev); 632 amd_xgbe_phy_serdes_complete_ratechange(phydev);
591 633
@@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
638 priv->serdes_blwc[XGBE_PHY_SPEED_1000]); 680 priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
639 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, 681 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
640 priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]); 682 priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
683 XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
684 priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
685 XRXTX_IOWRITE(priv, RXTX_REG22,
686 priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
641 687
642 amd_xgbe_phy_serdes_complete_ratechange(phydev); 688 amd_xgbe_phy_serdes_complete_ratechange(phydev);
643 689
@@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
1668 sizeof(priv->serdes_tx_amp)); 1714 sizeof(priv->serdes_tx_amp));
1669 } 1715 }
1670 1716
1717 if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
1718 ret = device_property_read_u32_array(phy_dev,
1719 XGBE_PHY_DFE_CFG_PROPERTY,
1720 priv->serdes_dfe_tap_cfg,
1721 XGBE_PHY_SPEEDS);
1722 if (ret) {
1723 dev_err(dev, "invalid %s property\n",
1724 XGBE_PHY_DFE_CFG_PROPERTY);
1725 goto err_sir1;
1726 }
1727 } else {
1728 memcpy(priv->serdes_dfe_tap_cfg,
1729 amd_xgbe_phy_serdes_dfe_tap_cfg,
1730 sizeof(priv->serdes_dfe_tap_cfg));
1731 }
1732
1733 if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
1734 ret = device_property_read_u32_array(phy_dev,
1735 XGBE_PHY_DFE_ENA_PROPERTY,
1736 priv->serdes_dfe_tap_ena,
1737 XGBE_PHY_SPEEDS);
1738 if (ret) {
1739 dev_err(dev, "invalid %s property\n",
1740 XGBE_PHY_DFE_ENA_PROPERTY);
1741 goto err_sir1;
1742 }
1743 } else {
1744 memcpy(priv->serdes_dfe_tap_ena,
1745 amd_xgbe_phy_serdes_dfe_tap_ena,
1746 sizeof(priv->serdes_dfe_tap_ena));
1747 }
1748
1671 phydev->priv = priv; 1749 phydev->priv = priv;
1672 1750
1673 if (!priv->adev || acpi_disabled) 1751 if (!priv->adev || acpi_disabled)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index cdcac6aa4260..52cd8db2c57d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -236,6 +236,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
236} 236}
237 237
238/** 238/**
239 * phy_check_valid - check if there is a valid PHY setting which matches
240 * speed, duplex, and feature mask
241 * @speed: speed to match
242 * @duplex: duplex to match
243 * @features: A mask of the valid settings
244 *
245 * Description: Returns true if there is a valid setting, false otherwise.
246 */
247static inline bool phy_check_valid(int speed, int duplex, u32 features)
248{
249 unsigned int idx;
250
251 idx = phy_find_valid(phy_find_setting(speed, duplex), features);
252
253 return settings[idx].speed == speed && settings[idx].duplex == duplex &&
254 (settings[idx].setting & features);
255}
256
257/**
239 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex 258 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
240 * @phydev: the target phy_device struct 259 * @phydev: the target phy_device struct
241 * 260 *
@@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1045 int eee_lp, eee_cap, eee_adv; 1064 int eee_lp, eee_cap, eee_adv;
1046 u32 lp, cap, adv; 1065 u32 lp, cap, adv;
1047 int status; 1066 int status;
1048 unsigned int idx;
1049 1067
1050 /* Read phy status to properly get the right settings */ 1068 /* Read phy status to properly get the right settings */
1051 status = phy_read_status(phydev); 1069 status = phy_read_status(phydev);
@@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1077 1095
1078 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); 1096 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1079 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); 1097 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1080 idx = phy_find_setting(phydev->speed, phydev->duplex); 1098 if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
1081 if (!(lp & adv & settings[idx].setting))
1082 goto eee_exit_err; 1099 goto eee_exit_err;
1083 1100
1084 if (clk_stop_enable) { 1101 if (clk_stop_enable) {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a7d163bf5bbb..9d3366f7c9ad 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -43,9 +43,7 @@
43 43
44static struct team_port *team_port_get_rcu(const struct net_device *dev) 44static struct team_port *team_port_get_rcu(const struct net_device *dev)
45{ 45{
46 struct team_port *port = rcu_dereference(dev->rx_handler_data); 46 return rcu_dereference(dev->rx_handler_data);
47
48 return team_port_exists(dev) ? port : NULL;
49} 47}
50 48
51static struct team_port *team_port_get_rtnl(const struct net_device *dev) 49static struct team_port *team_port_get_rtnl(const struct net_device *dev)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 37eed4d84e9c..7ba8d0885f12 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -161,6 +161,7 @@ config USB_NET_AX8817X
161 * Linksys USB200M 161 * Linksys USB200M
162 * Netgear FA120 162 * Netgear FA120
163 * Sitecom LN-029 163 * Sitecom LN-029
164 * Sitecom LN-028
164 * Intellinet USB 2.0 Ethernet 165 * Intellinet USB 2.0 Ethernet
165 * ST Lab USB 2.0 Ethernet 166 * ST Lab USB 2.0 Ethernet
166 * TrendNet TU2-ET100 167 * TrendNet TU2-ET100
@@ -397,14 +398,14 @@ config USB_NET_CDC_SUBSET
397 not generally have permanently assigned Ethernet addresses. 398 not generally have permanently assigned Ethernet addresses.
398 399
399config USB_ALI_M5632 400config USB_ALI_M5632
400 boolean "ALi M5632 based 'USB 2.0 Data Link' cables" 401 bool "ALi M5632 based 'USB 2.0 Data Link' cables"
401 depends on USB_NET_CDC_SUBSET 402 depends on USB_NET_CDC_SUBSET
402 help 403 help
403 Choose this option if you're using a host-to-host cable 404 Choose this option if you're using a host-to-host cable
404 based on this design, which supports USB 2.0 high speed. 405 based on this design, which supports USB 2.0 high speed.
405 406
406config USB_AN2720 407config USB_AN2720
407 boolean "AnchorChips 2720 based cables (Xircom PGUNET, ...)" 408 bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
408 depends on USB_NET_CDC_SUBSET 409 depends on USB_NET_CDC_SUBSET
409 help 410 help
410 Choose this option if you're using a host-to-host cable 411 Choose this option if you're using a host-to-host cable
@@ -412,7 +413,7 @@ config USB_AN2720
412 Cypress brand. 413 Cypress brand.
413 414
414config USB_BELKIN 415config USB_BELKIN
415 boolean "eTEK based host-to-host cables (Advance, Belkin, ...)" 416 bool "eTEK based host-to-host cables (Advance, Belkin, ...)"
416 depends on USB_NET_CDC_SUBSET 417 depends on USB_NET_CDC_SUBSET
417 default y 418 default y
418 help 419 help
@@ -421,7 +422,7 @@ config USB_BELKIN
421 microcontroller, with LEDs that indicate traffic. 422 microcontroller, with LEDs that indicate traffic.
422 423
423config USB_ARMLINUX 424config USB_ARMLINUX
424 boolean "Embedded ARM Linux links (iPaq, ...)" 425 bool "Embedded ARM Linux links (iPaq, ...)"
425 depends on USB_NET_CDC_SUBSET 426 depends on USB_NET_CDC_SUBSET
426 default y 427 default y
427 help 428 help
@@ -438,14 +439,14 @@ config USB_ARMLINUX
438 this simpler protocol by installing a different kernel. 439 this simpler protocol by installing a different kernel.
439 440
440config USB_EPSON2888 441config USB_EPSON2888
441 boolean "Epson 2888 based firmware (DEVELOPMENT)" 442 bool "Epson 2888 based firmware (DEVELOPMENT)"
442 depends on USB_NET_CDC_SUBSET 443 depends on USB_NET_CDC_SUBSET
443 help 444 help
444 Choose this option to support the usb networking links used 445 Choose this option to support the usb networking links used
445 by some sample firmware from Epson. 446 by some sample firmware from Epson.
446 447
447config USB_KC2190 448config USB_KC2190
448 boolean "KT Technology KC2190 based cables (InstaNet)" 449 bool "KT Technology KC2190 based cables (InstaNet)"
449 depends on USB_NET_CDC_SUBSET 450 depends on USB_NET_CDC_SUBSET
450 help 451 help
451 Choose this option if you're using a host-to-host cable 452 Choose this option if you're using a host-to-host cable
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index bf49792062a2..1173a24feda3 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -979,6 +979,10 @@ static const struct usb_device_id products [] = {
979 USB_DEVICE (0x0df6, 0x0056), 979 USB_DEVICE (0x0df6, 0x0056),
980 .driver_info = (unsigned long) &ax88178_info, 980 .driver_info = (unsigned long) &ax88178_info,
981}, { 981}, {
982 // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter"
983 USB_DEVICE (0x0df6, 0x061c),
984 .driver_info = (unsigned long) &ax88178_info,
985}, {
982 // corega FEther USB2-TX 986 // corega FEther USB2-TX
983 USB_DEVICE (0x07aa, 0x0017), 987 USB_DEVICE (0x07aa, 0x0017),
984 .driver_info = (unsigned long) &ax8817x_info, 988 .driver_info = (unsigned long) &ax8817x_info,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 3c8dfe5e46ed..111d907e0c11 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1597,7 +1597,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg)
1597 } 1597 }
1598 cprev = cnow; 1598 cprev = cnow;
1599 } 1599 }
1600 current->state = TASK_RUNNING; 1600 __set_current_state(TASK_RUNNING);
1601 remove_wait_queue(&tiocmget->waitq, &wait); 1601 remove_wait_queue(&tiocmget->waitq, &wait);
1602 1602
1603 return ret; 1603 return ret;
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 3d18bb0eee85..1bfe0fcaccf5 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -134,6 +134,11 @@ static const struct usb_device_id products [] = {
134}, { 134}, {
135 USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */ 135 USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */
136 .driver_info = (unsigned long) &prolific_info, 136 .driver_info = (unsigned long) &prolific_info,
137}, {
138 USB_DEVICE(0x3923, 0x7825), /* National Instruments USB
139 * Host-to-Host Cable
140 */
141 .driver_info = (unsigned long) &prolific_info,
137}, 142},
138 143
139 { }, // END 144 { }, // END
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 110a2cf67244..f1ff3666f090 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1710,6 +1710,12 @@ static int virtnet_probe(struct virtio_device *vdev)
1710 struct virtnet_info *vi; 1710 struct virtnet_info *vi;
1711 u16 max_queue_pairs; 1711 u16 max_queue_pairs;
1712 1712
1713 if (!vdev->config->get) {
1714 dev_err(&vdev->dev, "%s failure: config access disabled\n",
1715 __func__);
1716 return -EINVAL;
1717 }
1718
1713 if (!virtnet_validate_features(vdev)) 1719 if (!virtnet_validate_features(vdev))
1714 return -EINVAL; 1720 return -EINVAL;
1715 1721
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 83c39e2858bf..88d121d43c08 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file,
806 spin_lock_irqsave(&cosa->lock, flags); 806 spin_lock_irqsave(&cosa->lock, flags);
807 add_wait_queue(&chan->rxwaitq, &wait); 807 add_wait_queue(&chan->rxwaitq, &wait);
808 while (!chan->rx_status) { 808 while (!chan->rx_status) {
809 current->state = TASK_INTERRUPTIBLE; 809 set_current_state(TASK_INTERRUPTIBLE);
810 spin_unlock_irqrestore(&cosa->lock, flags); 810 spin_unlock_irqrestore(&cosa->lock, flags);
811 schedule(); 811 schedule();
812 spin_lock_irqsave(&cosa->lock, flags); 812 spin_lock_irqsave(&cosa->lock, flags);
813 if (signal_pending(current) && chan->rx_status == 0) { 813 if (signal_pending(current) && chan->rx_status == 0) {
814 chan->rx_status = 1; 814 chan->rx_status = 1;
815 remove_wait_queue(&chan->rxwaitq, &wait); 815 remove_wait_queue(&chan->rxwaitq, &wait);
816 current->state = TASK_RUNNING; 816 __set_current_state(TASK_RUNNING);
817 spin_unlock_irqrestore(&cosa->lock, flags); 817 spin_unlock_irqrestore(&cosa->lock, flags);
818 mutex_unlock(&chan->rlock); 818 mutex_unlock(&chan->rlock);
819 return -ERESTARTSYS; 819 return -ERESTARTSYS;
820 } 820 }
821 } 821 }
822 remove_wait_queue(&chan->rxwaitq, &wait); 822 remove_wait_queue(&chan->rxwaitq, &wait);
823 current->state = TASK_RUNNING; 823 __set_current_state(TASK_RUNNING);
824 kbuf = chan->rxdata; 824 kbuf = chan->rxdata;
825 count = chan->rxsize; 825 count = chan->rxsize;
826 spin_unlock_irqrestore(&cosa->lock, flags); 826 spin_unlock_irqrestore(&cosa->lock, flags);
@@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file,
890 spin_lock_irqsave(&cosa->lock, flags); 890 spin_lock_irqsave(&cosa->lock, flags);
891 add_wait_queue(&chan->txwaitq, &wait); 891 add_wait_queue(&chan->txwaitq, &wait);
892 while (!chan->tx_status) { 892 while (!chan->tx_status) {
893 current->state = TASK_INTERRUPTIBLE; 893 set_current_state(TASK_INTERRUPTIBLE);
894 spin_unlock_irqrestore(&cosa->lock, flags); 894 spin_unlock_irqrestore(&cosa->lock, flags);
895 schedule(); 895 schedule();
896 spin_lock_irqsave(&cosa->lock, flags); 896 spin_lock_irqsave(&cosa->lock, flags);
897 if (signal_pending(current) && chan->tx_status == 0) { 897 if (signal_pending(current) && chan->tx_status == 0) {
898 chan->tx_status = 1; 898 chan->tx_status = 1;
899 remove_wait_queue(&chan->txwaitq, &wait); 899 remove_wait_queue(&chan->txwaitq, &wait);
900 current->state = TASK_RUNNING; 900 __set_current_state(TASK_RUNNING);
901 chan->tx_status = 1; 901 chan->tx_status = 1;
902 spin_unlock_irqrestore(&cosa->lock, flags); 902 spin_unlock_irqrestore(&cosa->lock, flags);
903 up(&chan->wsem); 903 up(&chan->wsem);
@@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file,
905 } 905 }
906 } 906 }
907 remove_wait_queue(&chan->txwaitq, &wait); 907 remove_wait_queue(&chan->txwaitq, &wait);
908 current->state = TASK_RUNNING; 908 __set_current_state(TASK_RUNNING);
909 up(&chan->wsem); 909 up(&chan->wsem);
910 spin_unlock_irqrestore(&cosa->lock, flags); 910 spin_unlock_irqrestore(&cosa->lock, flags);
911 kfree(kbuf); 911 kfree(kbuf);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c1947c5915eb..d56b7859a437 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
946 goto nla_put_failure; 946 goto nla_put_failure;
947 947
948 genlmsg_end(skb, msg_head); 948 genlmsg_end(skb, msg_head);
949 genlmsg_unicast(&init_net, skb, dst_portid); 949 if (genlmsg_unicast(&init_net, skb, dst_portid))
950 goto err_free_txskb;
950 951
951 /* Enqueue the packet */ 952 /* Enqueue the packet */
952 skb_queue_tail(&data->pending, my_skb); 953 skb_queue_tail(&data->pending, my_skb);
@@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
955 return; 956 return;
956 957
957nla_put_failure: 958nla_put_failure:
959 nlmsg_free(skb);
960err_free_txskb:
958 printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); 961 printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
959 ieee80211_free_txskb(hw, my_skb); 962 ieee80211_free_txskb(hw, my_skb);
960 data->tx_failed++; 963 data->tx_failed++;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 006b8bcb2e31..2b4ef256c6b9 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -243,14 +243,14 @@ config RT2X00_LIB
243 select AVERAGE 243 select AVERAGE
244 244
245config RT2X00_LIB_FIRMWARE 245config RT2X00_LIB_FIRMWARE
246 boolean 246 bool
247 select FW_LOADER 247 select FW_LOADER
248 248
249config RT2X00_LIB_CRYPTO 249config RT2X00_LIB_CRYPTO
250 boolean 250 bool
251 251
252config RT2X00_LIB_LEDS 252config RT2X00_LIB_LEDS
253 boolean 253 bool
254 default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n) 254 default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
255 255
256config RT2X00_LIB_DEBUGFS 256config RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f7a31d2cb3f1..c4d68d768408 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
655 unsigned long flags; 655 unsigned long flags;
656 656
657 do { 657 do {
658 int notify;
659
658 spin_lock_irqsave(&queue->response_lock, flags); 660 spin_lock_irqsave(&queue->response_lock, flags);
659 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); 661 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
662 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
660 spin_unlock_irqrestore(&queue->response_lock, flags); 663 spin_unlock_irqrestore(&queue->response_lock, flags);
664 if (notify)
665 notify_remote_via_irq(queue->tx_irq);
666
661 if (cons == end) 667 if (cons == end)
662 break; 668 break;
663 txp = RING_GET_REQUEST(&queue->tx, cons++); 669 txp = RING_GET_REQUEST(&queue->tx, cons++);
@@ -1649,17 +1655,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1649{ 1655{
1650 struct pending_tx_info *pending_tx_info; 1656 struct pending_tx_info *pending_tx_info;
1651 pending_ring_idx_t index; 1657 pending_ring_idx_t index;
1658 int notify;
1652 unsigned long flags; 1659 unsigned long flags;
1653 1660
1654 pending_tx_info = &queue->pending_tx_info[pending_idx]; 1661 pending_tx_info = &queue->pending_tx_info[pending_idx];
1662
1655 spin_lock_irqsave(&queue->response_lock, flags); 1663 spin_lock_irqsave(&queue->response_lock, flags);
1664
1656 make_tx_response(queue, &pending_tx_info->req, status); 1665 make_tx_response(queue, &pending_tx_info->req, status);
1657 index = pending_index(queue->pending_prod); 1666
1667 /* Release the pending index before pusing the Tx response so
1668 * its available before a new Tx request is pushed by the
1669 * frontend.
1670 */
1671 index = pending_index(queue->pending_prod++);
1658 queue->pending_ring[index] = pending_idx; 1672 queue->pending_ring[index] = pending_idx;
1659 /* TX shouldn't use the index before we give it back here */ 1673
1660 mb(); 1674 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1661 queue->pending_prod++; 1675
1662 spin_unlock_irqrestore(&queue->response_lock, flags); 1676 spin_unlock_irqrestore(&queue->response_lock, flags);
1677
1678 if (notify)
1679 notify_remote_via_irq(queue->tx_irq);
1663} 1680}
1664 1681
1665 1682
@@ -1669,7 +1686,6 @@ static void make_tx_response(struct xenvif_queue *queue,
1669{ 1686{
1670 RING_IDX i = queue->tx.rsp_prod_pvt; 1687 RING_IDX i = queue->tx.rsp_prod_pvt;
1671 struct xen_netif_tx_response *resp; 1688 struct xen_netif_tx_response *resp;
1672 int notify;
1673 1689
1674 resp = RING_GET_RESPONSE(&queue->tx, i); 1690 resp = RING_GET_RESPONSE(&queue->tx, i);
1675 resp->id = txp->id; 1691 resp->id = txp->id;
@@ -1679,9 +1695,6 @@ static void make_tx_response(struct xenvif_queue *queue,
1679 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; 1695 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1680 1696
1681 queue->tx.rsp_prod_pvt = ++i; 1697 queue->tx.rsp_prod_pvt = ++i;
1682 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1683 if (notify)
1684 notify_remote_via_irq(queue->tx_irq);
1685} 1698}
1686 1699
1687static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, 1700static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 110fece2ff53..62426d81a4d6 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -229,7 +229,6 @@ parse_failed:
229 resource_list_for_each_entry(window, resources) 229 resource_list_for_each_entry(window, resources)
230 kfree(window->res); 230 kfree(window->res);
231 pci_free_resource_list(resources); 231 pci_free_resource_list(resources);
232 kfree(bus_range);
233 return err; 232 return err;
234} 233}
235EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); 234EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig
index 389440228c1d..7d1437b01fdd 100644
--- a/drivers/pci/pcie/aer/Kconfig
+++ b/drivers/pci/pcie/aer/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config PCIEAER 5config PCIEAER
6 boolean "Root Port Advanced Error Reporting support" 6 bool "Root Port Advanced Error Reporting support"
7 depends on PCIEPORTBUS 7 depends on PCIEPORTBUS
8 select RAS 8 select RAS
9 default y 9 default y
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 638e797037da..97527614141b 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -735,6 +735,31 @@ config INTEL_IPS
735 functionality. If in doubt, say Y here; it will only load on 735 functionality. If in doubt, say Y here; it will only load on
736 supported platforms. 736 supported platforms.
737 737
738config INTEL_IMR
739 bool "Intel Isolated Memory Region support"
740 default n
741 depends on X86_INTEL_QUARK && IOSF_MBI
742 ---help---
743 This option provides a means to manipulate Isolated Memory Regions.
744 IMRs are a set of registers that define read and write access masks
745 to prohibit certain system agents from accessing memory with 1 KiB
746 granularity.
747
748 IMRs make it possible to control read/write access to an address
749 by hardware agents inside the SoC. Read and write masks can be
750 defined for:
751 - eSRAM flush
752 - Dirty CPU snoop (write only)
753 - RMU access
754 - PCI Virtual Channel 0/Virtual Channel 1
755 - SMM mode
756 - Non SMM mode
757
758 Quark contains a set of eight IMR registers and makes use of those
759 registers during its bootup process.
760
761 If you are running on a Galileo/Quark say Y here.
762
738config IBM_RTL 763config IBM_RTL
739 tristate "Device driver to enable PRTL support" 764 tristate "Device driver to enable PRTL support"
740 depends on X86 && PCI 765 depends on X86 && PCI
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index f71700e0d132..46b274693872 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -856,8 +856,8 @@ static void asus_backlight_exit(struct asus_laptop *asus)
856 * than count bytes. We set eof to 1 if we handle those 2 values. We return the 856 * than count bytes. We set eof to 1 if we handle those 2 values. We return the
857 * number of bytes written in page 857 * number of bytes written in page
858 */ 858 */
859static ssize_t show_infos(struct device *dev, 859static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
860 struct device_attribute *attr, char *page) 860 char *page)
861{ 861{
862 struct asus_laptop *asus = dev_get_drvdata(dev); 862 struct asus_laptop *asus = dev_get_drvdata(dev);
863 int len = 0; 863 int len = 0;
@@ -926,6 +926,7 @@ static ssize_t show_infos(struct device *dev,
926 926
927 return len; 927 return len;
928} 928}
929static DEVICE_ATTR_RO(infos);
929 930
930static int parse_arg(const char *buf, unsigned long count, int *val) 931static int parse_arg(const char *buf, unsigned long count, int *val)
931{ 932{
@@ -957,15 +958,15 @@ static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
957/* 958/*
958 * LEDD display 959 * LEDD display
959 */ 960 */
960static ssize_t show_ledd(struct device *dev, 961static ssize_t ledd_show(struct device *dev, struct device_attribute *attr,
961 struct device_attribute *attr, char *buf) 962 char *buf)
962{ 963{
963 struct asus_laptop *asus = dev_get_drvdata(dev); 964 struct asus_laptop *asus = dev_get_drvdata(dev);
964 965
965 return sprintf(buf, "0x%08x\n", asus->ledd_status); 966 return sprintf(buf, "0x%08x\n", asus->ledd_status);
966} 967}
967 968
968static ssize_t store_ledd(struct device *dev, struct device_attribute *attr, 969static ssize_t ledd_store(struct device *dev, struct device_attribute *attr,
969 const char *buf, size_t count) 970 const char *buf, size_t count)
970{ 971{
971 struct asus_laptop *asus = dev_get_drvdata(dev); 972 struct asus_laptop *asus = dev_get_drvdata(dev);
@@ -981,6 +982,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
981 } 982 }
982 return rv; 983 return rv;
983} 984}
985static DEVICE_ATTR_RW(ledd);
984 986
985/* 987/*
986 * Wireless 988 * Wireless
@@ -1014,21 +1016,22 @@ static int asus_wlan_set(struct asus_laptop *asus, int status)
1014 return 0; 1016 return 0;
1015} 1017}
1016 1018
1017static ssize_t show_wlan(struct device *dev, 1019static ssize_t wlan_show(struct device *dev, struct device_attribute *attr,
1018 struct device_attribute *attr, char *buf) 1020 char *buf)
1019{ 1021{
1020 struct asus_laptop *asus = dev_get_drvdata(dev); 1022 struct asus_laptop *asus = dev_get_drvdata(dev);
1021 1023
1022 return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS)); 1024 return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS));
1023} 1025}
1024 1026
1025static ssize_t store_wlan(struct device *dev, struct device_attribute *attr, 1027static ssize_t wlan_store(struct device *dev, struct device_attribute *attr,
1026 const char *buf, size_t count) 1028 const char *buf, size_t count)
1027{ 1029{
1028 struct asus_laptop *asus = dev_get_drvdata(dev); 1030 struct asus_laptop *asus = dev_get_drvdata(dev);
1029 1031
1030 return sysfs_acpi_set(asus, buf, count, METHOD_WLAN); 1032 return sysfs_acpi_set(asus, buf, count, METHOD_WLAN);
1031} 1033}
1034static DEVICE_ATTR_RW(wlan);
1032 1035
1033/*e 1036/*e
1034 * Bluetooth 1037 * Bluetooth
@@ -1042,15 +1045,15 @@ static int asus_bluetooth_set(struct asus_laptop *asus, int status)
1042 return 0; 1045 return 0;
1043} 1046}
1044 1047
1045static ssize_t show_bluetooth(struct device *dev, 1048static ssize_t bluetooth_show(struct device *dev, struct device_attribute *attr,
1046 struct device_attribute *attr, char *buf) 1049 char *buf)
1047{ 1050{
1048 struct asus_laptop *asus = dev_get_drvdata(dev); 1051 struct asus_laptop *asus = dev_get_drvdata(dev);
1049 1052
1050 return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS)); 1053 return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS));
1051} 1054}
1052 1055
1053static ssize_t store_bluetooth(struct device *dev, 1056static ssize_t bluetooth_store(struct device *dev,
1054 struct device_attribute *attr, const char *buf, 1057 struct device_attribute *attr, const char *buf,
1055 size_t count) 1058 size_t count)
1056{ 1059{
@@ -1058,6 +1061,7 @@ static ssize_t store_bluetooth(struct device *dev,
1058 1061
1059 return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH); 1062 return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH);
1060} 1063}
1064static DEVICE_ATTR_RW(bluetooth);
1061 1065
1062/* 1066/*
1063 * Wimax 1067 * Wimax
@@ -1071,22 +1075,22 @@ static int asus_wimax_set(struct asus_laptop *asus, int status)
1071 return 0; 1075 return 0;
1072} 1076}
1073 1077
1074static ssize_t show_wimax(struct device *dev, 1078static ssize_t wimax_show(struct device *dev, struct device_attribute *attr,
1075 struct device_attribute *attr, char *buf) 1079 char *buf)
1076{ 1080{
1077 struct asus_laptop *asus = dev_get_drvdata(dev); 1081 struct asus_laptop *asus = dev_get_drvdata(dev);
1078 1082
1079 return sprintf(buf, "%d\n", asus_wireless_status(asus, WM_RSTS)); 1083 return sprintf(buf, "%d\n", asus_wireless_status(asus, WM_RSTS));
1080} 1084}
1081 1085
1082static ssize_t store_wimax(struct device *dev, 1086static ssize_t wimax_store(struct device *dev, struct device_attribute *attr,
1083 struct device_attribute *attr, const char *buf, 1087 const char *buf, size_t count)
1084 size_t count)
1085{ 1088{
1086 struct asus_laptop *asus = dev_get_drvdata(dev); 1089 struct asus_laptop *asus = dev_get_drvdata(dev);
1087 1090
1088 return sysfs_acpi_set(asus, buf, count, METHOD_WIMAX); 1091 return sysfs_acpi_set(asus, buf, count, METHOD_WIMAX);
1089} 1092}
1093static DEVICE_ATTR_RW(wimax);
1090 1094
1091/* 1095/*
1092 * Wwan 1096 * Wwan
@@ -1100,22 +1104,22 @@ static int asus_wwan_set(struct asus_laptop *asus, int status)
1100 return 0; 1104 return 0;
1101} 1105}
1102 1106
1103static ssize_t show_wwan(struct device *dev, 1107static ssize_t wwan_show(struct device *dev, struct device_attribute *attr,
1104 struct device_attribute *attr, char *buf) 1108 char *buf)
1105{ 1109{
1106 struct asus_laptop *asus = dev_get_drvdata(dev); 1110 struct asus_laptop *asus = dev_get_drvdata(dev);
1107 1111
1108 return sprintf(buf, "%d\n", asus_wireless_status(asus, WW_RSTS)); 1112 return sprintf(buf, "%d\n", asus_wireless_status(asus, WW_RSTS));
1109} 1113}
1110 1114
1111static ssize_t store_wwan(struct device *dev, 1115static ssize_t wwan_store(struct device *dev, struct device_attribute *attr,
1112 struct device_attribute *attr, const char *buf, 1116 const char *buf, size_t count)
1113 size_t count)
1114{ 1117{
1115 struct asus_laptop *asus = dev_get_drvdata(dev); 1118 struct asus_laptop *asus = dev_get_drvdata(dev);
1116 1119
1117 return sysfs_acpi_set(asus, buf, count, METHOD_WWAN); 1120 return sysfs_acpi_set(asus, buf, count, METHOD_WWAN);
1118} 1121}
1122static DEVICE_ATTR_RW(wwan);
1119 1123
1120/* 1124/*
1121 * Display 1125 * Display
@@ -1135,8 +1139,8 @@ static void asus_set_display(struct asus_laptop *asus, int value)
1135 * displays hooked up simultaneously, so be warned. See the acpi4asus README 1139 * displays hooked up simultaneously, so be warned. See the acpi4asus README
1136 * for more info. 1140 * for more info.
1137 */ 1141 */
1138static ssize_t store_disp(struct device *dev, struct device_attribute *attr, 1142static ssize_t display_store(struct device *dev, struct device_attribute *attr,
1139 const char *buf, size_t count) 1143 const char *buf, size_t count)
1140{ 1144{
1141 struct asus_laptop *asus = dev_get_drvdata(dev); 1145 struct asus_laptop *asus = dev_get_drvdata(dev);
1142 int rv, value; 1146 int rv, value;
@@ -1146,6 +1150,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
1146 asus_set_display(asus, value); 1150 asus_set_display(asus, value);
1147 return rv; 1151 return rv;
1148} 1152}
1153static DEVICE_ATTR_WO(display);
1149 1154
1150/* 1155/*
1151 * Light Sens 1156 * Light Sens
@@ -1167,16 +1172,17 @@ static void asus_als_switch(struct asus_laptop *asus, int value)
1167 asus->light_switch = value; 1172 asus->light_switch = value;
1168} 1173}
1169 1174
1170static ssize_t show_lssw(struct device *dev, 1175static ssize_t ls_switch_show(struct device *dev, struct device_attribute *attr,
1171 struct device_attribute *attr, char *buf) 1176 char *buf)
1172{ 1177{
1173 struct asus_laptop *asus = dev_get_drvdata(dev); 1178 struct asus_laptop *asus = dev_get_drvdata(dev);
1174 1179
1175 return sprintf(buf, "%d\n", asus->light_switch); 1180 return sprintf(buf, "%d\n", asus->light_switch);
1176} 1181}
1177 1182
1178static ssize_t store_lssw(struct device *dev, struct device_attribute *attr, 1183static ssize_t ls_switch_store(struct device *dev,
1179 const char *buf, size_t count) 1184 struct device_attribute *attr, const char *buf,
1185 size_t count)
1180{ 1186{
1181 struct asus_laptop *asus = dev_get_drvdata(dev); 1187 struct asus_laptop *asus = dev_get_drvdata(dev);
1182 int rv, value; 1188 int rv, value;
@@ -1187,6 +1193,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
1187 1193
1188 return rv; 1194 return rv;
1189} 1195}
1196static DEVICE_ATTR_RW(ls_switch);
1190 1197
1191static void asus_als_level(struct asus_laptop *asus, int value) 1198static void asus_als_level(struct asus_laptop *asus, int value)
1192{ 1199{
@@ -1195,16 +1202,16 @@ static void asus_als_level(struct asus_laptop *asus, int value)
1195 asus->light_level = value; 1202 asus->light_level = value;
1196} 1203}
1197 1204
1198static ssize_t show_lslvl(struct device *dev, 1205static ssize_t ls_level_show(struct device *dev, struct device_attribute *attr,
1199 struct device_attribute *attr, char *buf) 1206 char *buf)
1200{ 1207{
1201 struct asus_laptop *asus = dev_get_drvdata(dev); 1208 struct asus_laptop *asus = dev_get_drvdata(dev);
1202 1209
1203 return sprintf(buf, "%d\n", asus->light_level); 1210 return sprintf(buf, "%d\n", asus->light_level);
1204} 1211}
1205 1212
1206static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr, 1213static ssize_t ls_level_store(struct device *dev, struct device_attribute *attr,
1207 const char *buf, size_t count) 1214 const char *buf, size_t count)
1208{ 1215{
1209 struct asus_laptop *asus = dev_get_drvdata(dev); 1216 struct asus_laptop *asus = dev_get_drvdata(dev);
1210 int rv, value; 1217 int rv, value;
@@ -1218,6 +1225,7 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
1218 1225
1219 return rv; 1226 return rv;
1220} 1227}
1228static DEVICE_ATTR_RW(ls_level);
1221 1229
1222static int pega_int_read(struct asus_laptop *asus, int arg, int *result) 1230static int pega_int_read(struct asus_laptop *asus, int arg, int *result)
1223{ 1231{
@@ -1234,8 +1242,8 @@ static int pega_int_read(struct asus_laptop *asus, int arg, int *result)
1234 return err; 1242 return err;
1235} 1243}
1236 1244
1237static ssize_t show_lsvalue(struct device *dev, 1245static ssize_t ls_value_show(struct device *dev, struct device_attribute *attr,
1238 struct device_attribute *attr, char *buf) 1246 char *buf)
1239{ 1247{
1240 struct asus_laptop *asus = dev_get_drvdata(dev); 1248 struct asus_laptop *asus = dev_get_drvdata(dev);
1241 int err, hi, lo; 1249 int err, hi, lo;
@@ -1247,6 +1255,7 @@ static ssize_t show_lsvalue(struct device *dev,
1247 return sprintf(buf, "%d\n", 10 * hi + lo); 1255 return sprintf(buf, "%d\n", 10 * hi + lo);
1248 return err; 1256 return err;
1249} 1257}
1258static DEVICE_ATTR_RO(ls_value);
1250 1259
1251/* 1260/*
1252 * GPS 1261 * GPS
@@ -1274,15 +1283,15 @@ static int asus_gps_switch(struct asus_laptop *asus, int status)
1274 return 0; 1283 return 0;
1275} 1284}
1276 1285
1277static ssize_t show_gps(struct device *dev, 1286static ssize_t gps_show(struct device *dev, struct device_attribute *attr,
1278 struct device_attribute *attr, char *buf) 1287 char *buf)
1279{ 1288{
1280 struct asus_laptop *asus = dev_get_drvdata(dev); 1289 struct asus_laptop *asus = dev_get_drvdata(dev);
1281 1290
1282 return sprintf(buf, "%d\n", asus_gps_status(asus)); 1291 return sprintf(buf, "%d\n", asus_gps_status(asus));
1283} 1292}
1284 1293
1285static ssize_t store_gps(struct device *dev, struct device_attribute *attr, 1294static ssize_t gps_store(struct device *dev, struct device_attribute *attr,
1286 const char *buf, size_t count) 1295 const char *buf, size_t count)
1287{ 1296{
1288 struct asus_laptop *asus = dev_get_drvdata(dev); 1297 struct asus_laptop *asus = dev_get_drvdata(dev);
@@ -1298,6 +1307,7 @@ static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
1298 rfkill_set_sw_state(asus->gps.rfkill, !value); 1307 rfkill_set_sw_state(asus->gps.rfkill, !value);
1299 return rv; 1308 return rv;
1300} 1309}
1310static DEVICE_ATTR_RW(gps);
1301 1311
1302/* 1312/*
1303 * rfkill 1313 * rfkill
@@ -1569,19 +1579,6 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event)
1569 asus_input_notify(asus, event); 1579 asus_input_notify(asus, event);
1570} 1580}
1571 1581
1572static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL);
1573static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan);
1574static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR,
1575 show_bluetooth, store_bluetooth);
1576static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax);
1577static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan);
1578static DEVICE_ATTR(display, S_IWUSR, NULL, store_disp);
1579static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd);
1580static DEVICE_ATTR(ls_value, S_IRUGO, show_lsvalue, NULL);
1581static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl);
1582static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw);
1583static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps);
1584
1585static struct attribute *asus_attributes[] = { 1582static struct attribute *asus_attributes[] = {
1586 &dev_attr_infos.attr, 1583 &dev_attr_infos.attr,
1587 &dev_attr_wlan.attr, 1584 &dev_attr_wlan.attr,
@@ -1616,7 +1613,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
1616 else 1613 else
1617 goto normal; 1614 goto normal;
1618 1615
1619 return supported; 1616 return supported ? attr->mode : 0;
1620 } 1617 }
1621 1618
1622normal: 1619normal:
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 70d355a9ae2c..55cf10bc7817 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -520,7 +520,7 @@ static acpi_status cmpc_get_accel(acpi_handle handle,
520{ 520{
521 union acpi_object param[2]; 521 union acpi_object param[2];
522 struct acpi_object_list input; 522 struct acpi_object_list input;
523 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, 0 }; 523 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
524 unsigned char *locs; 524 unsigned char *locs;
525 acpi_status status; 525 acpi_status status;
526 526
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 7c21c1c44dfa..2a9afa261c61 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -64,6 +64,7 @@
64#include <linux/acpi.h> 64#include <linux/acpi.h>
65#include <linux/dmi.h> 65#include <linux/dmi.h>
66#include <linux/backlight.h> 66#include <linux/backlight.h>
67#include <linux/fb.h>
67#include <linux/input.h> 68#include <linux/input.h>
68#include <linux/kfifo.h> 69#include <linux/kfifo.h>
69#include <linux/platform_device.h> 70#include <linux/platform_device.h>
@@ -398,7 +399,7 @@ static int bl_get_brightness(struct backlight_device *b)
398static int bl_update_status(struct backlight_device *b) 399static int bl_update_status(struct backlight_device *b)
399{ 400{
400 int ret; 401 int ret;
401 if (b->props.power == 4) 402 if (b->props.power == FB_BLANK_POWERDOWN)
402 ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x3); 403 ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
403 else 404 else
404 ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x0); 405 ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
@@ -1139,9 +1140,9 @@ static int __init fujitsu_init(void)
1139 1140
1140 if (!acpi_video_backlight_support()) { 1141 if (!acpi_video_backlight_support()) {
1141 if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3) 1142 if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
1142 fujitsu->bl_device->props.power = 4; 1143 fujitsu->bl_device->props.power = FB_BLANK_POWERDOWN;
1143 else 1144 else
1144 fujitsu->bl_device->props.power = 0; 1145 fujitsu->bl_device->props.power = FB_BLANK_UNBLANK;
1145 } 1146 }
1146 1147
1147 pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n"); 1148 pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 66a4d3284aab..001b199a8c33 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism 2 * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism
3 * 3 *
4 * (C) Copyright 2008-2010 Intel Corporation 4 * (C) Copyright 2008-2010,2015 Intel Corporation
5 * Author: Sreedhara DS (sreedhara.ds@intel.com) 5 * Author: Sreedhara DS (sreedhara.ds@intel.com)
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -43,10 +43,9 @@
43/* 43/*
44 * IPC register summary 44 * IPC register summary
45 * 45 *
46 * IPC register blocks are memory mapped at fixed address of 0xFF11C000 46 * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
47 * To read or write information to the SCU, driver writes to IPC-1 memory 47 * To read or write information to the SCU, driver writes to IPC-1 memory
48 * mapped registers (base address 0xFF11C000). The following is the IPC 48 * mapped registers. The following is the IPC mechanism
49 * mechanism
50 * 49 *
51 * 1. IA core cDMI interface claims this transaction and converts it to a 50 * 1. IA core cDMI interface claims this transaction and converts it to a
52 * Transaction Layer Packet (TLP) message which is sent across the cDMI. 51 * Transaction Layer Packet (TLP) message which is sent across the cDMI.
@@ -67,36 +66,28 @@
67#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea 66#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea
68#define PCI_DEVICE_ID_TANGIER 0x11a0 67#define PCI_DEVICE_ID_TANGIER 0x11a0
69 68
70/* intel scu ipc driver data*/ 69/* intel scu ipc driver data */
71struct intel_scu_ipc_pdata_t { 70struct intel_scu_ipc_pdata_t {
72 u32 ipc_base;
73 u32 i2c_base; 71 u32 i2c_base;
74 u32 ipc_len;
75 u32 i2c_len; 72 u32 i2c_len;
76 u8 irq_mode; 73 u8 irq_mode;
77}; 74};
78 75
79static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = { 76static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
80 .ipc_base = 0xff11c000,
81 .i2c_base = 0xff12b000, 77 .i2c_base = 0xff12b000,
82 .ipc_len = 0x100,
83 .i2c_len = 0x10, 78 .i2c_len = 0x10,
84 .irq_mode = 0, 79 .irq_mode = 0,
85}; 80};
86 81
87/* Penwell and Cloverview */ 82/* Penwell and Cloverview */
88static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = { 83static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
89 .ipc_base = 0xff11c000,
90 .i2c_base = 0xff12b000, 84 .i2c_base = 0xff12b000,
91 .ipc_len = 0x100,
92 .i2c_len = 0x10, 85 .i2c_len = 0x10,
93 .irq_mode = 1, 86 .irq_mode = 1,
94}; 87};
95 88
96static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = { 89static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
97 .ipc_base = 0xff009000,
98 .i2c_base = 0xff00d000, 90 .i2c_base = 0xff00d000,
99 .ipc_len = 0x100,
100 .i2c_len = 0x10, 91 .i2c_len = 0x10,
101 .irq_mode = 0, 92 .irq_mode = 0,
102}; 93};
@@ -114,8 +105,6 @@ struct intel_scu_ipc_dev {
114 105
115static struct intel_scu_ipc_dev ipcdev; /* Only one for now */ 106static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
116 107
117static int platform; /* Platform type */
118
119/* 108/*
120 * IPC Read Buffer (Read Only): 109 * IPC Read Buffer (Read Only):
121 * 16 byte buffer for receiving data from SCU, if IPC command 110 * 16 byte buffer for receiving data from SCU, if IPC command
@@ -160,7 +149,6 @@ static inline void ipc_data_writel(u32 data, u32 offset) /* Write ipc data */
160 * Format: 149 * Format:
161 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)| 150 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
162 */ 151 */
163
164static inline u8 ipc_read_status(void) 152static inline u8 ipc_read_status(void)
165{ 153{
166 return __raw_readl(ipcdev.ipc_base + 0x04); 154 return __raw_readl(ipcdev.ipc_base + 0x04);
@@ -176,23 +164,24 @@ static inline u32 ipc_data_readl(u32 offset) /* Read ipc u32 data */
176 return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset); 164 return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
177} 165}
178 166
179static inline int busy_loop(void) /* Wait till scu status is busy */ 167/* Wait till scu status is busy */
168static inline int busy_loop(void)
180{ 169{
181 u32 status = 0; 170 u32 status = ipc_read_status();
182 u32 loop_count = 0; 171 u32 loop_count = 100000;
183 172
184 status = ipc_read_status(); 173 /* break if scu doesn't reset busy bit after huge retry */
185 while (status & 1) { 174 while ((status & BIT(0)) && --loop_count) {
186 udelay(1); /* scu processing time is in few u secods */ 175 udelay(1); /* scu processing time is in few u secods */
187 status = ipc_read_status(); 176 status = ipc_read_status();
188 loop_count++;
189 /* break if scu doesn't reset busy bit after huge retry */
190 if (loop_count > 100000) {
191 dev_err(&ipcdev.pdev->dev, "IPC timed out");
192 return -ETIMEDOUT;
193 }
194 } 177 }
195 if ((status >> 1) & 1) 178
179 if (status & BIT(0)) {
180 dev_err(&ipcdev.pdev->dev, "IPC timed out");
181 return -ETIMEDOUT;
182 }
183
184 if (status & BIT(1))
196 return -EIO; 185 return -EIO;
197 186
198 return 0; 187 return 0;
@@ -210,14 +199,13 @@ static inline int ipc_wait_for_interrupt(void)
210 } 199 }
211 200
212 status = ipc_read_status(); 201 status = ipc_read_status();
213 202 if (status & BIT(1))
214 if ((status >> 1) & 1)
215 return -EIO; 203 return -EIO;
216 204
217 return 0; 205 return 0;
218} 206}
219 207
220int intel_scu_ipc_check_status(void) 208static int intel_scu_ipc_check_status(void)
221{ 209{
222 return ipcdev.irq_mode ? ipc_wait_for_interrupt() : busy_loop(); 210 return ipcdev.irq_mode ? ipc_wait_for_interrupt() : busy_loop();
223} 211}
@@ -248,18 +236,18 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
248 if (id == IPC_CMD_PCNTRL_R) { 236 if (id == IPC_CMD_PCNTRL_R) {
249 for (nc = 0, offset = 0; nc < count; nc++, offset += 4) 237 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
250 ipc_data_writel(wbuf[nc], offset); 238 ipc_data_writel(wbuf[nc], offset);
251 ipc_command((count*2) << 16 | id << 12 | 0 << 8 | op); 239 ipc_command((count * 2) << 16 | id << 12 | 0 << 8 | op);
252 } else if (id == IPC_CMD_PCNTRL_W) { 240 } else if (id == IPC_CMD_PCNTRL_W) {
253 for (nc = 0; nc < count; nc++, offset += 1) 241 for (nc = 0; nc < count; nc++, offset += 1)
254 cbuf[offset] = data[nc]; 242 cbuf[offset] = data[nc];
255 for (nc = 0, offset = 0; nc < count; nc++, offset += 4) 243 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
256 ipc_data_writel(wbuf[nc], offset); 244 ipc_data_writel(wbuf[nc], offset);
257 ipc_command((count*3) << 16 | id << 12 | 0 << 8 | op); 245 ipc_command((count * 3) << 16 | id << 12 | 0 << 8 | op);
258 } else if (id == IPC_CMD_PCNTRL_M) { 246 } else if (id == IPC_CMD_PCNTRL_M) {
259 cbuf[offset] = data[0]; 247 cbuf[offset] = data[0];
260 cbuf[offset + 1] = data[1]; 248 cbuf[offset + 1] = data[1];
261 ipc_data_writel(wbuf[0], 0); /* Write wbuff */ 249 ipc_data_writel(wbuf[0], 0); /* Write wbuff */
262 ipc_command(4 << 16 | id << 12 | 0 << 8 | op); 250 ipc_command(4 << 16 | id << 12 | 0 << 8 | op);
263 } 251 }
264 252
265 err = intel_scu_ipc_check_status(); 253 err = intel_scu_ipc_check_status();
@@ -301,7 +289,7 @@ EXPORT_SYMBOL(intel_scu_ipc_ioread8);
301 */ 289 */
302int intel_scu_ipc_ioread16(u16 addr, u16 *data) 290int intel_scu_ipc_ioread16(u16 addr, u16 *data)
303{ 291{
304 u16 x[2] = {addr, addr + 1 }; 292 u16 x[2] = {addr, addr + 1};
305 return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); 293 return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
306} 294}
307EXPORT_SYMBOL(intel_scu_ipc_ioread16); 295EXPORT_SYMBOL(intel_scu_ipc_ioread16);
@@ -351,7 +339,7 @@ EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
351 */ 339 */
352int intel_scu_ipc_iowrite16(u16 addr, u16 data) 340int intel_scu_ipc_iowrite16(u16 addr, u16 data)
353{ 341{
354 u16 x[2] = {addr, addr + 1 }; 342 u16 x[2] = {addr, addr + 1};
355 return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); 343 return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
356} 344}
357EXPORT_SYMBOL(intel_scu_ipc_iowrite16); 345EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
@@ -412,7 +400,6 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
412} 400}
413EXPORT_SYMBOL(intel_scu_ipc_writev); 401EXPORT_SYMBOL(intel_scu_ipc_writev);
414 402
415
416/** 403/**
417 * intel_scu_ipc_update_register - r/m/w a register 404 * intel_scu_ipc_update_register - r/m/w a register
418 * @addr: register address 405 * @addr: register address
@@ -475,9 +462,8 @@ EXPORT_SYMBOL(intel_scu_ipc_simple_command);
475 * Issue a command to the SCU which involves data transfers. Do the 462 * Issue a command to the SCU which involves data transfers. Do the
476 * data copies under the lock but leave it for the caller to interpret 463 * data copies under the lock but leave it for the caller to interpret
477 */ 464 */
478
479int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, 465int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
480 u32 *out, int outlen) 466 u32 *out, int outlen)
481{ 467{
482 int i, err; 468 int i, err;
483 469
@@ -503,7 +489,7 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
503} 489}
504EXPORT_SYMBOL(intel_scu_ipc_command); 490EXPORT_SYMBOL(intel_scu_ipc_command);
505 491
506/*I2C commands */ 492/* I2C commands */
507#define IPC_I2C_WRITE 1 /* I2C Write command */ 493#define IPC_I2C_WRITE 1 /* I2C Write command */
508#define IPC_I2C_READ 2 /* I2C Read command */ 494#define IPC_I2C_READ 2 /* I2C Read command */
509 495
@@ -577,7 +563,7 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
577{ 563{
578 int err; 564 int err;
579 struct intel_scu_ipc_pdata_t *pdata; 565 struct intel_scu_ipc_pdata_t *pdata;
580 resource_size_t pci_resource; 566 resource_size_t base;
581 567
582 if (ipcdev.pdev) /* We support only one SCU */ 568 if (ipcdev.pdev) /* We support only one SCU */
583 return -EBUSY; 569 return -EBUSY;
@@ -595,8 +581,8 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
595 if (err) 581 if (err)
596 return err; 582 return err;
597 583
598 pci_resource = pci_resource_start(dev, 0); 584 base = pci_resource_start(dev, 0);
599 if (!pci_resource) 585 if (!base)
600 return -ENOMEM; 586 return -ENOMEM;
601 587
602 init_completion(&ipcdev.cmd_complete); 588 init_completion(&ipcdev.cmd_complete);
@@ -604,7 +590,7 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
604 if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev)) 590 if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev))
605 return -EBUSY; 591 return -EBUSY;
606 592
607 ipcdev.ipc_base = ioremap_nocache(pdata->ipc_base, pdata->ipc_len); 593 ipcdev.ipc_base = ioremap_nocache(base, pci_resource_len(dev, 0));
608 if (!ipcdev.ipc_base) 594 if (!ipcdev.ipc_base)
609 return -ENOMEM; 595 return -ENOMEM;
610 596
@@ -666,9 +652,10 @@ static struct pci_driver ipc_driver = {
666 .remove = ipc_remove, 652 .remove = ipc_remove,
667}; 653};
668 654
669
670static int __init intel_scu_ipc_init(void) 655static int __init intel_scu_ipc_init(void)
671{ 656{
657 int platform; /* Platform type */
658
672 platform = intel_mid_identify_cpu(); 659 platform = intel_mid_identify_cpu();
673 if (platform == 0) 660 if (platform == 0)
674 return -ENODEV; 661 return -ENODEV;
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index ff765d8e1a09..9e701b2256f9 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -124,6 +124,10 @@ struct sabi_commands {
124 u16 get_wireless_status; 124 u16 get_wireless_status;
125 u16 set_wireless_status; 125 u16 set_wireless_status;
126 126
127 /* 0x80 is off, 0x81 is on */
128 u16 get_lid_handling;
129 u16 set_lid_handling;
130
127 /* 0x81 to read, (0x82 | level << 8) to set, 0xaabb to enable */ 131 /* 0x81 to read, (0x82 | level << 8) to set, 0xaabb to enable */
128 u16 kbd_backlight; 132 u16 kbd_backlight;
129 133
@@ -194,6 +198,9 @@ static const struct sabi_config sabi_configs[] = {
194 .get_wireless_status = 0xFFFF, 198 .get_wireless_status = 0xFFFF,
195 .set_wireless_status = 0xFFFF, 199 .set_wireless_status = 0xFFFF,
196 200
201 .get_lid_handling = 0xFFFF,
202 .set_lid_handling = 0xFFFF,
203
197 .kbd_backlight = 0xFFFF, 204 .kbd_backlight = 0xFFFF,
198 205
199 .set_linux = 0x0a, 206 .set_linux = 0x0a,
@@ -254,6 +261,9 @@ static const struct sabi_config sabi_configs[] = {
254 .get_wireless_status = 0x69, 261 .get_wireless_status = 0x69,
255 .set_wireless_status = 0x6a, 262 .set_wireless_status = 0x6a,
256 263
264 .get_lid_handling = 0x6d,
265 .set_lid_handling = 0x6e,
266
257 .kbd_backlight = 0x78, 267 .kbd_backlight = 0x78,
258 268
259 .set_linux = 0xff, 269 .set_linux = 0xff,
@@ -353,6 +363,8 @@ struct samsung_quirks {
353 bool broken_acpi_video; 363 bool broken_acpi_video;
354 bool four_kbd_backlight_levels; 364 bool four_kbd_backlight_levels;
355 bool enable_kbd_backlight; 365 bool enable_kbd_backlight;
366 bool use_native_backlight;
367 bool lid_handling;
356}; 368};
357 369
358static struct samsung_quirks samsung_unknown = {}; 370static struct samsung_quirks samsung_unknown = {};
@@ -361,11 +373,19 @@ static struct samsung_quirks samsung_broken_acpi_video = {
361 .broken_acpi_video = true, 373 .broken_acpi_video = true,
362}; 374};
363 375
376static struct samsung_quirks samsung_use_native_backlight = {
377 .use_native_backlight = true,
378};
379
364static struct samsung_quirks samsung_np740u3e = { 380static struct samsung_quirks samsung_np740u3e = {
365 .four_kbd_backlight_levels = true, 381 .four_kbd_backlight_levels = true,
366 .enable_kbd_backlight = true, 382 .enable_kbd_backlight = true,
367}; 383};
368 384
385static struct samsung_quirks samsung_lid_handling = {
386 .lid_handling = true,
387};
388
369static bool force; 389static bool force;
370module_param(force, bool, 0); 390module_param(force, bool, 0);
371MODULE_PARM_DESC(force, 391MODULE_PARM_DESC(force,
@@ -748,7 +768,7 @@ static ssize_t set_battery_life_extender(struct device *dev,
748 struct samsung_laptop *samsung = dev_get_drvdata(dev); 768 struct samsung_laptop *samsung = dev_get_drvdata(dev);
749 int ret, value; 769 int ret, value;
750 770
751 if (!count || sscanf(buf, "%i", &value) != 1) 771 if (!count || kstrtoint(buf, 0, &value) != 0)
752 return -EINVAL; 772 return -EINVAL;
753 773
754 ret = write_battery_life_extender(samsung, !!value); 774 ret = write_battery_life_extender(samsung, !!value);
@@ -817,7 +837,7 @@ static ssize_t set_usb_charge(struct device *dev,
817 struct samsung_laptop *samsung = dev_get_drvdata(dev); 837 struct samsung_laptop *samsung = dev_get_drvdata(dev);
818 int ret, value; 838 int ret, value;
819 839
820 if (!count || sscanf(buf, "%i", &value) != 1) 840 if (!count || kstrtoint(buf, 0, &value) != 0)
821 return -EINVAL; 841 return -EINVAL;
822 842
823 ret = write_usb_charge(samsung, !!value); 843 ret = write_usb_charge(samsung, !!value);
@@ -830,10 +850,76 @@ static ssize_t set_usb_charge(struct device *dev,
830static DEVICE_ATTR(usb_charge, S_IWUSR | S_IRUGO, 850static DEVICE_ATTR(usb_charge, S_IWUSR | S_IRUGO,
831 get_usb_charge, set_usb_charge); 851 get_usb_charge, set_usb_charge);
832 852
853static int read_lid_handling(struct samsung_laptop *samsung)
854{
855 const struct sabi_commands *commands = &samsung->config->commands;
856 struct sabi_data data;
857 int retval;
858
859 if (commands->get_lid_handling == 0xFFFF)
860 return -ENODEV;
861
862 memset(&data, 0, sizeof(data));
863 retval = sabi_command(samsung, commands->get_lid_handling,
864 &data, &data);
865
866 if (retval)
867 return retval;
868
869 return data.data[0] & 0x1;
870}
871
872static int write_lid_handling(struct samsung_laptop *samsung,
873 int enabled)
874{
875 const struct sabi_commands *commands = &samsung->config->commands;
876 struct sabi_data data;
877
878 memset(&data, 0, sizeof(data));
879 data.data[0] = 0x80 | enabled;
880 return sabi_command(samsung, commands->set_lid_handling,
881 &data, NULL);
882}
883
884static ssize_t get_lid_handling(struct device *dev,
885 struct device_attribute *attr,
886 char *buf)
887{
888 struct samsung_laptop *samsung = dev_get_drvdata(dev);
889 int ret;
890
891 ret = read_lid_handling(samsung);
892 if (ret < 0)
893 return ret;
894
895 return sprintf(buf, "%d\n", ret);
896}
897
898static ssize_t set_lid_handling(struct device *dev,
899 struct device_attribute *attr,
900 const char *buf, size_t count)
901{
902 struct samsung_laptop *samsung = dev_get_drvdata(dev);
903 int ret, value;
904
905 if (!count || kstrtoint(buf, 0, &value) != 0)
906 return -EINVAL;
907
908 ret = write_lid_handling(samsung, !!value);
909 if (ret < 0)
910 return ret;
911
912 return count;
913}
914
915static DEVICE_ATTR(lid_handling, S_IWUSR | S_IRUGO,
916 get_lid_handling, set_lid_handling);
917
833static struct attribute *platform_attributes[] = { 918static struct attribute *platform_attributes[] = {
834 &dev_attr_performance_level.attr, 919 &dev_attr_performance_level.attr,
835 &dev_attr_battery_life_extender.attr, 920 &dev_attr_battery_life_extender.attr,
836 &dev_attr_usb_charge.attr, 921 &dev_attr_usb_charge.attr,
922 &dev_attr_lid_handling.attr,
837 NULL 923 NULL
838}; 924};
839 925
@@ -956,6 +1042,22 @@ static int __init samsung_rfkill_init(struct samsung_laptop *samsung)
956 return 0; 1042 return 0;
957} 1043}
958 1044
1045static void samsung_lid_handling_exit(struct samsung_laptop *samsung)
1046{
1047 if (samsung->quirks->lid_handling)
1048 write_lid_handling(samsung, 0);
1049}
1050
1051static int __init samsung_lid_handling_init(struct samsung_laptop *samsung)
1052{
1053 int retval = 0;
1054
1055 if (samsung->quirks->lid_handling)
1056 retval = write_lid_handling(samsung, 1);
1057
1058 return retval;
1059}
1060
959static int kbd_backlight_enable(struct samsung_laptop *samsung) 1061static int kbd_backlight_enable(struct samsung_laptop *samsung)
960{ 1062{
961 const struct sabi_commands *commands = &samsung->config->commands; 1063 const struct sabi_commands *commands = &samsung->config->commands;
@@ -1111,7 +1213,7 @@ static int __init samsung_backlight_init(struct samsung_laptop *samsung)
1111} 1213}
1112 1214
1113static umode_t samsung_sysfs_is_visible(struct kobject *kobj, 1215static umode_t samsung_sysfs_is_visible(struct kobject *kobj,
1114 struct attribute *attr, int idx) 1216 struct attribute *attr, int idx)
1115{ 1217{
1116 struct device *dev = container_of(kobj, struct device, kobj); 1218 struct device *dev = container_of(kobj, struct device, kobj);
1117 struct platform_device *pdev = to_platform_device(dev); 1219 struct platform_device *pdev = to_platform_device(dev);
@@ -1124,6 +1226,8 @@ static umode_t samsung_sysfs_is_visible(struct kobject *kobj,
1124 ok = !!(read_battery_life_extender(samsung) >= 0); 1226 ok = !!(read_battery_life_extender(samsung) >= 0);
1125 if (attr == &dev_attr_usb_charge.attr) 1227 if (attr == &dev_attr_usb_charge.attr)
1126 ok = !!(read_usb_charge(samsung) >= 0); 1228 ok = !!(read_usb_charge(samsung) >= 0);
1229 if (attr == &dev_attr_lid_handling.attr)
1230 ok = !!(read_lid_handling(samsung) >= 0);
1127 1231
1128 return ok ? attr->mode : 0; 1232 return ok ? attr->mode : 0;
1129} 1233}
@@ -1357,7 +1461,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung)
1357 samsung_sabi_diag(samsung); 1461 samsung_sabi_diag(samsung);
1358 1462
1359 /* Try to find one of the signatures in memory to find the header */ 1463 /* Try to find one of the signatures in memory to find the header */
1360 for (i = 0; sabi_configs[i].test_string != 0; ++i) { 1464 for (i = 0; sabi_configs[i].test_string != NULL; ++i) {
1361 samsung->config = &sabi_configs[i]; 1465 samsung->config = &sabi_configs[i];
1362 loca = find_signature(samsung->f0000_segment, 1466 loca = find_signature(samsung->f0000_segment,
1363 samsung->config->test_string); 1467 samsung->config->test_string);
@@ -1436,6 +1540,9 @@ static int samsung_pm_notification(struct notifier_block *nb,
1436 samsung->quirks->enable_kbd_backlight) 1540 samsung->quirks->enable_kbd_backlight)
1437 kbd_backlight_enable(samsung); 1541 kbd_backlight_enable(samsung);
1438 1542
1543 if (val == PM_POST_HIBERNATION && samsung->quirks->lid_handling)
1544 write_lid_handling(samsung, 1);
1545
1439 return 0; 1546 return 0;
1440} 1547}
1441 1548
@@ -1507,7 +1614,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
1507 DMI_MATCH(DMI_PRODUCT_NAME, "N150P"), 1614 DMI_MATCH(DMI_PRODUCT_NAME, "N150P"),
1508 DMI_MATCH(DMI_BOARD_NAME, "N150P"), 1615 DMI_MATCH(DMI_BOARD_NAME, "N150P"),
1509 }, 1616 },
1510 .driver_data = &samsung_broken_acpi_video, 1617 .driver_data = &samsung_use_native_backlight,
1511 }, 1618 },
1512 { 1619 {
1513 .callback = samsung_dmi_matched, 1620 .callback = samsung_dmi_matched,
@@ -1517,7 +1624,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
1517 DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"), 1624 DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
1518 DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"), 1625 DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
1519 }, 1626 },
1520 .driver_data = &samsung_broken_acpi_video, 1627 .driver_data = &samsung_use_native_backlight,
1521 }, 1628 },
1522 { 1629 {
1523 .callback = samsung_dmi_matched, 1630 .callback = samsung_dmi_matched,
@@ -1557,7 +1664,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
1557 DMI_MATCH(DMI_PRODUCT_NAME, "N250P"), 1664 DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
1558 DMI_MATCH(DMI_BOARD_NAME, "N250P"), 1665 DMI_MATCH(DMI_BOARD_NAME, "N250P"),
1559 }, 1666 },
1560 .driver_data = &samsung_broken_acpi_video, 1667 .driver_data = &samsung_use_native_backlight,
1561 }, 1668 },
1562 { 1669 {
1563 .callback = samsung_dmi_matched, 1670 .callback = samsung_dmi_matched,
@@ -1578,6 +1685,15 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
1578 }, 1685 },
1579 .driver_data = &samsung_np740u3e, 1686 .driver_data = &samsung_np740u3e,
1580 }, 1687 },
1688 {
1689 .callback = samsung_dmi_matched,
1690 .ident = "300V3Z/300V4Z/300V5Z",
1691 .matches = {
1692 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
1693 DMI_MATCH(DMI_PRODUCT_NAME, "300V3Z/300V4Z/300V5Z"),
1694 },
1695 .driver_data = &samsung_lid_handling,
1696 },
1581 { }, 1697 { },
1582}; 1698};
1583MODULE_DEVICE_TABLE(dmi, samsung_dmi_table); 1699MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
@@ -1616,6 +1732,15 @@ static int __init samsung_init(void)
1616 pr_info("Disabling ACPI video driver\n"); 1732 pr_info("Disabling ACPI video driver\n");
1617 acpi_video_unregister(); 1733 acpi_video_unregister();
1618 } 1734 }
1735
1736 if (samsung->quirks->use_native_backlight) {
1737 pr_info("Using native backlight driver\n");
1738 /* Tell acpi-video to not handle the backlight */
1739 acpi_video_dmi_promote_vendor();
1740 acpi_video_unregister();
1741 /* And also do not handle it ourselves */
1742 samsung->handle_backlight = false;
1743 }
1619#endif 1744#endif
1620 1745
1621 ret = samsung_platform_init(samsung); 1746 ret = samsung_platform_init(samsung);
@@ -1648,6 +1773,10 @@ static int __init samsung_init(void)
1648 if (ret) 1773 if (ret)
1649 goto error_leds; 1774 goto error_leds;
1650 1775
1776 ret = samsung_lid_handling_init(samsung);
1777 if (ret)
1778 goto error_lid_handling;
1779
1651 ret = samsung_debugfs_init(samsung); 1780 ret = samsung_debugfs_init(samsung);
1652 if (ret) 1781 if (ret)
1653 goto error_debugfs; 1782 goto error_debugfs;
@@ -1659,6 +1788,8 @@ static int __init samsung_init(void)
1659 return ret; 1788 return ret;
1660 1789
1661error_debugfs: 1790error_debugfs:
1791 samsung_lid_handling_exit(samsung);
1792error_lid_handling:
1662 samsung_leds_exit(samsung); 1793 samsung_leds_exit(samsung);
1663error_leds: 1794error_leds:
1664 samsung_rfkill_exit(samsung); 1795 samsung_rfkill_exit(samsung);
@@ -1683,6 +1814,7 @@ static void __exit samsung_exit(void)
1683 unregister_pm_notifier(&samsung->pm_nb); 1814 unregister_pm_notifier(&samsung->pm_nb);
1684 1815
1685 samsung_debugfs_exit(samsung); 1816 samsung_debugfs_exit(samsung);
1817 samsung_lid_handling_exit(samsung);
1686 samsung_leds_exit(samsung); 1818 samsung_leds_exit(samsung);
1687 samsung_rfkill_exit(samsung); 1819 samsung_rfkill_exit(samsung);
1688 samsung_backlight_exit(samsung); 1820 samsung_backlight_exit(samsung);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 6dd1c0e7dcd9..e51c1e753607 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1032,7 +1032,7 @@ struct sony_backlight_props {
1032 u8 offset; 1032 u8 offset;
1033 u8 maxlvl; 1033 u8 maxlvl;
1034}; 1034};
1035struct sony_backlight_props sony_bl_props; 1035static struct sony_backlight_props sony_bl_props;
1036 1036
1037static int sony_backlight_update_status(struct backlight_device *bd) 1037static int sony_backlight_update_status(struct backlight_device *bd)
1038{ 1038{
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index c3d11fabc46f..3b8ceee7c5cb 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -196,6 +196,7 @@ enum tpacpi_hkey_event_t {
196 /* Key-related user-interface events */ 196 /* Key-related user-interface events */
197 TP_HKEY_EV_KEY_NUMLOCK = 0x6000, /* NumLock key pressed */ 197 TP_HKEY_EV_KEY_NUMLOCK = 0x6000, /* NumLock key pressed */
198 TP_HKEY_EV_KEY_FN = 0x6005, /* Fn key pressed? E420 */ 198 TP_HKEY_EV_KEY_FN = 0x6005, /* Fn key pressed? E420 */
199 TP_HKEY_EV_KEY_FN_ESC = 0x6060, /* Fn+Esc key pressed X240 */
199 200
200 /* Thermal events */ 201 /* Thermal events */
201 TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */ 202 TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */
@@ -3456,7 +3457,7 @@ enum ADAPTIVE_KEY_MODE {
3456 LAYFLAT_MODE 3457 LAYFLAT_MODE
3457}; 3458};
3458 3459
3459const int adaptive_keyboard_modes[] = { 3460static const int adaptive_keyboard_modes[] = {
3460 HOME_MODE, 3461 HOME_MODE,
3461/* WEB_BROWSER_MODE = 2, 3462/* WEB_BROWSER_MODE = 2,
3462 WEB_CONFERENCE_MODE = 3, */ 3463 WEB_CONFERENCE_MODE = 3, */
@@ -3712,6 +3713,7 @@ static bool hotkey_notify_6xxx(const u32 hkey,
3712 3713
3713 case TP_HKEY_EV_KEY_NUMLOCK: 3714 case TP_HKEY_EV_KEY_NUMLOCK:
3714 case TP_HKEY_EV_KEY_FN: 3715 case TP_HKEY_EV_KEY_FN:
3716 case TP_HKEY_EV_KEY_FN_ESC:
3715 /* key press events, we just ignore them as long as the EC 3717 /* key press events, we just ignore them as long as the EC
3716 * is still reporting them in the normal keyboard stream */ 3718 * is still reporting them in the normal keyboard stream */
3717 *send_acpi_ev = false; 3719 *send_acpi_ev = false;
@@ -8883,17 +8885,31 @@ static bool __pure __init tpacpi_is_fw_digit(const char c)
8883 return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z'); 8885 return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z');
8884} 8886}
8885 8887
8886/* Most models: xxyTkkWW (#.##c); Ancient 570/600 and -SL lacks (#.##c) */
8887static bool __pure __init tpacpi_is_valid_fw_id(const char * const s, 8888static bool __pure __init tpacpi_is_valid_fw_id(const char * const s,
8888 const char t) 8889 const char t)
8889{ 8890{
8890 return s && strlen(s) >= 8 && 8891 /*
8892 * Most models: xxyTkkWW (#.##c)
8893 * Ancient 570/600 and -SL lacks (#.##c)
8894 */
8895 if (s && strlen(s) >= 8 &&
8891 tpacpi_is_fw_digit(s[0]) && 8896 tpacpi_is_fw_digit(s[0]) &&
8892 tpacpi_is_fw_digit(s[1]) && 8897 tpacpi_is_fw_digit(s[1]) &&
8893 s[2] == t && 8898 s[2] == t &&
8894 (s[3] == 'T' || s[3] == 'N') && 8899 (s[3] == 'T' || s[3] == 'N') &&
8895 tpacpi_is_fw_digit(s[4]) && 8900 tpacpi_is_fw_digit(s[4]) &&
8896 tpacpi_is_fw_digit(s[5]); 8901 tpacpi_is_fw_digit(s[5]))
8902 return true;
8903
8904 /* New models: xxxyTkkW (#.##c); T550 and some others */
8905 return s && strlen(s) >= 8 &&
8906 tpacpi_is_fw_digit(s[0]) &&
8907 tpacpi_is_fw_digit(s[1]) &&
8908 tpacpi_is_fw_digit(s[2]) &&
8909 s[3] == t &&
8910 (s[4] == 'T' || s[4] == 'N') &&
8911 tpacpi_is_fw_digit(s[5]) &&
8912 tpacpi_is_fw_digit(s[6]);
8897} 8913}
8898 8914
8899/* returns 0 - probe ok, or < 0 - probe error. 8915/* returns 0 - probe ok, or < 0 - probe error.
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index fc34a71866ed..dbcb7a8915b8 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1,11 +1,10 @@
1/* 1/*
2 * toshiba_acpi.c - Toshiba Laptop ACPI Extras 2 * toshiba_acpi.c - Toshiba Laptop ACPI Extras
3 * 3 *
4 *
5 * Copyright (C) 2002-2004 John Belmonte 4 * Copyright (C) 2002-2004 John Belmonte
6 * Copyright (C) 2008 Philip Langdale 5 * Copyright (C) 2008 Philip Langdale
7 * Copyright (C) 2010 Pierre Ducroquet 6 * Copyright (C) 2010 Pierre Ducroquet
8 * Copyright (C) 2014 Azael Avalos 7 * Copyright (C) 2014-2015 Azael Avalos
9 * 8 *
10 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -17,10 +16,8 @@
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 17 * GNU General Public License for more details.
19 * 18 *
20 * You should have received a copy of the GNU General Public License 19 * The full GNU General Public License is included in this distribution in
21 * along with this program; if not, write to the Free Software 20 * the file called "COPYING".
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * 21 *
25 * The devolpment page for this driver is located at 22 * The devolpment page for this driver is located at
26 * http://memebeam.org/toys/ToshibaAcpiDriver. 23 * http://memebeam.org/toys/ToshibaAcpiDriver.
@@ -30,15 +27,11 @@
30 * engineering the Windows drivers 27 * engineering the Windows drivers
31 * Yasushi Nagato - changes for linux kernel 2.4 -> 2.5 28 * Yasushi Nagato - changes for linux kernel 2.4 -> 2.5
32 * Rob Miller - TV out and hotkeys help 29 * Rob Miller - TV out and hotkeys help
33 *
34 *
35 * TODO
36 *
37 */ 30 */
38 31
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 33
41#define TOSHIBA_ACPI_VERSION "0.20" 34#define TOSHIBA_ACPI_VERSION "0.21"
42#define PROC_INTERFACE_VERSION 1 35#define PROC_INTERFACE_VERSION 1
43 36
44#include <linux/kernel.h> 37#include <linux/kernel.h>
@@ -57,7 +50,7 @@
57#include <linux/i8042.h> 50#include <linux/i8042.h>
58#include <linux/acpi.h> 51#include <linux/acpi.h>
59#include <linux/dmi.h> 52#include <linux/dmi.h>
60#include <asm/uaccess.h> 53#include <linux/uaccess.h>
61 54
62MODULE_AUTHOR("John Belmonte"); 55MODULE_AUTHOR("John Belmonte");
63MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver"); 56MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
@@ -71,7 +64,8 @@ MODULE_LICENSE("GPL");
71/* Toshiba ACPI method paths */ 64/* Toshiba ACPI method paths */
72#define METHOD_VIDEO_OUT "\\_SB_.VALX.DSSX" 65#define METHOD_VIDEO_OUT "\\_SB_.VALX.DSSX"
73 66
74/* The Toshiba configuration interface is composed of the HCI and the SCI, 67/*
68 * The Toshiba configuration interface is composed of the HCI and the SCI,
75 * which are defined as follows: 69 * which are defined as follows:
76 * 70 *
77 * HCI is Toshiba's "Hardware Control Interface" which is supposed to 71 * HCI is Toshiba's "Hardware Control Interface" which is supposed to
@@ -108,6 +102,7 @@ MODULE_LICENSE("GPL");
108#define TOS_FIFO_EMPTY 0x8c00 102#define TOS_FIFO_EMPTY 0x8c00
109#define TOS_DATA_NOT_AVAILABLE 0x8d20 103#define TOS_DATA_NOT_AVAILABLE 0x8d20
110#define TOS_NOT_INITIALIZED 0x8d50 104#define TOS_NOT_INITIALIZED 0x8d50
105#define TOS_NOT_INSTALLED 0x8e00
111 106
112/* registers */ 107/* registers */
113#define HCI_FAN 0x0004 108#define HCI_FAN 0x0004
@@ -121,9 +116,14 @@ MODULE_LICENSE("GPL");
121#define HCI_KBD_ILLUMINATION 0x0095 116#define HCI_KBD_ILLUMINATION 0x0095
122#define HCI_ECO_MODE 0x0097 117#define HCI_ECO_MODE 0x0097
123#define HCI_ACCELEROMETER2 0x00a6 118#define HCI_ACCELEROMETER2 0x00a6
119#define SCI_PANEL_POWER_ON 0x010d
124#define SCI_ILLUMINATION 0x014e 120#define SCI_ILLUMINATION 0x014e
121#define SCI_USB_SLEEP_CHARGE 0x0150
125#define SCI_KBD_ILLUM_STATUS 0x015c 122#define SCI_KBD_ILLUM_STATUS 0x015c
123#define SCI_USB_SLEEP_MUSIC 0x015e
124#define SCI_USB_THREE 0x0169
126#define SCI_TOUCHPAD 0x050e 125#define SCI_TOUCHPAD 0x050e
126#define SCI_KBD_FUNCTION_KEYS 0x0522
127 127
128/* field definitions */ 128/* field definitions */
129#define HCI_ACCEL_MASK 0x7fff 129#define HCI_ACCEL_MASK 0x7fff
@@ -146,6 +146,15 @@ MODULE_LICENSE("GPL");
146#define SCI_KBD_MODE_ON 0x8 146#define SCI_KBD_MODE_ON 0x8
147#define SCI_KBD_MODE_OFF 0x10 147#define SCI_KBD_MODE_OFF 0x10
148#define SCI_KBD_TIME_MAX 0x3c001a 148#define SCI_KBD_TIME_MAX 0x3c001a
149#define SCI_USB_CHARGE_MODE_MASK 0xff
150#define SCI_USB_CHARGE_DISABLED 0x30000
151#define SCI_USB_CHARGE_ALTERNATE 0x30009
152#define SCI_USB_CHARGE_AUTO 0x30021
153#define SCI_USB_CHARGE_BAT_MASK 0x7
154#define SCI_USB_CHARGE_BAT_LVL_OFF 0x1
155#define SCI_USB_CHARGE_BAT_LVL_ON 0x4
156#define SCI_USB_CHARGE_BAT_LVL 0x0200
157#define SCI_USB_CHARGE_RAPID_DSP 0x0300
149 158
150struct toshiba_acpi_dev { 159struct toshiba_acpi_dev {
151 struct acpi_device *acpi_dev; 160 struct acpi_device *acpi_dev;
@@ -164,6 +173,7 @@ struct toshiba_acpi_dev {
164 int kbd_type; 173 int kbd_type;
165 int kbd_mode; 174 int kbd_mode;
166 int kbd_time; 175 int kbd_time;
176 int usbsc_bat_level;
167 177
168 unsigned int illumination_supported:1; 178 unsigned int illumination_supported:1;
169 unsigned int video_supported:1; 179 unsigned int video_supported:1;
@@ -177,6 +187,12 @@ struct toshiba_acpi_dev {
177 unsigned int touchpad_supported:1; 187 unsigned int touchpad_supported:1;
178 unsigned int eco_supported:1; 188 unsigned int eco_supported:1;
179 unsigned int accelerometer_supported:1; 189 unsigned int accelerometer_supported:1;
190 unsigned int usb_sleep_charge_supported:1;
191 unsigned int usb_rapid_charge_supported:1;
192 unsigned int usb_sleep_music_supported:1;
193 unsigned int kbd_function_keys_supported:1;
194 unsigned int panel_power_on_supported:1;
195 unsigned int usb_three_supported:1;
180 unsigned int sysfs_created:1; 196 unsigned int sysfs_created:1;
181 197
182 struct mutex mutex; 198 struct mutex mutex;
@@ -264,15 +280,17 @@ static const struct key_entry toshiba_acpi_alt_keymap[] = {
264 { KE_END, 0 }, 280 { KE_END, 0 },
265}; 281};
266 282
267/* utility 283/*
284 * Utility
268 */ 285 */
269 286
270static __inline__ void _set_bit(u32 * word, u32 mask, int value) 287static inline void _set_bit(u32 *word, u32 mask, int value)
271{ 288{
272 *word = (*word & ~mask) | (mask * value); 289 *word = (*word & ~mask) | (mask * value);
273} 290}
274 291
275/* acpi interface wrappers 292/*
293 * ACPI interface wrappers
276 */ 294 */
277 295
278static int write_acpi_int(const char *methodName, int val) 296static int write_acpi_int(const char *methodName, int val)
@@ -283,7 +301,8 @@ static int write_acpi_int(const char *methodName, int val)
283 return (status == AE_OK) ? 0 : -EIO; 301 return (status == AE_OK) ? 0 : -EIO;
284} 302}
285 303
286/* Perform a raw configuration call. Here we don't care about input or output 304/*
305 * Perform a raw configuration call. Here we don't care about input or output
287 * buffer format. 306 * buffer format.
288 */ 307 */
289static acpi_status tci_raw(struct toshiba_acpi_dev *dev, 308static acpi_status tci_raw(struct toshiba_acpi_dev *dev,
@@ -310,15 +329,15 @@ static acpi_status tci_raw(struct toshiba_acpi_dev *dev,
310 (char *)dev->method_hci, &params, 329 (char *)dev->method_hci, &params,
311 &results); 330 &results);
312 if ((status == AE_OK) && (out_objs->package.count <= TCI_WORDS)) { 331 if ((status == AE_OK) && (out_objs->package.count <= TCI_WORDS)) {
313 for (i = 0; i < out_objs->package.count; ++i) { 332 for (i = 0; i < out_objs->package.count; ++i)
314 out[i] = out_objs->package.elements[i].integer.value; 333 out[i] = out_objs->package.elements[i].integer.value;
315 }
316 } 334 }
317 335
318 return status; 336 return status;
319} 337}
320 338
321/* common hci tasks (get or set one or two value) 339/*
340 * Common hci tasks (get or set one or two value)
322 * 341 *
323 * In addition to the ACPI status, the HCI system returns a result which 342 * In addition to the ACPI status, the HCI system returns a result which
324 * may be useful (such as "not supported"). 343 * may be useful (such as "not supported").
@@ -338,6 +357,7 @@ static u32 hci_read1(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
338 u32 in[TCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 }; 357 u32 in[TCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 };
339 u32 out[TCI_WORDS]; 358 u32 out[TCI_WORDS];
340 acpi_status status = tci_raw(dev, in, out); 359 acpi_status status = tci_raw(dev, in, out);
360
341 if (ACPI_FAILURE(status)) 361 if (ACPI_FAILURE(status))
342 return TOS_FAILURE; 362 return TOS_FAILURE;
343 363
@@ -355,11 +375,13 @@ static u32 hci_write2(struct toshiba_acpi_dev *dev, u32 reg, u32 in1, u32 in2)
355 return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE; 375 return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE;
356} 376}
357 377
358static u32 hci_read2(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1, u32 *out2) 378static u32 hci_read2(struct toshiba_acpi_dev *dev,
379 u32 reg, u32 *out1, u32 *out2)
359{ 380{
360 u32 in[TCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 }; 381 u32 in[TCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 };
361 u32 out[TCI_WORDS]; 382 u32 out[TCI_WORDS];
362 acpi_status status = tci_raw(dev, in, out); 383 acpi_status status = tci_raw(dev, in, out);
384
363 if (ACPI_FAILURE(status)) 385 if (ACPI_FAILURE(status))
364 return TOS_FAILURE; 386 return TOS_FAILURE;
365 387
@@ -369,7 +391,8 @@ static u32 hci_read2(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1, u32 *out2
369 return out[0]; 391 return out[0];
370} 392}
371 393
372/* common sci tasks 394/*
395 * Common sci tasks
373 */ 396 */
374 397
375static int sci_open(struct toshiba_acpi_dev *dev) 398static int sci_open(struct toshiba_acpi_dev *dev)
@@ -389,6 +412,20 @@ static int sci_open(struct toshiba_acpi_dev *dev)
389 } else if (out[0] == TOS_ALREADY_OPEN) { 412 } else if (out[0] == TOS_ALREADY_OPEN) {
390 pr_info("Toshiba SCI already opened\n"); 413 pr_info("Toshiba SCI already opened\n");
391 return 1; 414 return 1;
415 } else if (out[0] == TOS_NOT_SUPPORTED) {
416 /*
417 * Some BIOSes do not have the SCI open/close functions
418 * implemented and return 0x8000 (Not Supported), failing to
419 * register some supported features.
420 *
421 * Simply return 1 if we hit those affected laptops to make the
422 * supported features work.
423 *
424 * In the case that some laptops really do not support the SCI,
425 * all the SCI dependent functions check for TOS_NOT_SUPPORTED,
426 * and thus, not registering support for the queried feature.
427 */
428 return 1;
392 } else if (out[0] == TOS_NOT_PRESENT) { 429 } else if (out[0] == TOS_NOT_PRESENT) {
393 pr_info("Toshiba SCI is not present\n"); 430 pr_info("Toshiba SCI is not present\n");
394 } 431 }
@@ -421,6 +458,7 @@ static u32 sci_read(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
421 u32 in[TCI_WORDS] = { SCI_GET, reg, 0, 0, 0, 0 }; 458 u32 in[TCI_WORDS] = { SCI_GET, reg, 0, 0, 0, 0 };
422 u32 out[TCI_WORDS]; 459 u32 out[TCI_WORDS];
423 acpi_status status = tci_raw(dev, in, out); 460 acpi_status status = tci_raw(dev, in, out);
461
424 if (ACPI_FAILURE(status)) 462 if (ACPI_FAILURE(status))
425 return TOS_FAILURE; 463 return TOS_FAILURE;
426 464
@@ -529,10 +567,11 @@ static int toshiba_kbd_illum_available(struct toshiba_acpi_dev *dev)
529 return 0; 567 return 0;
530 } 568 }
531 569
532 /* Check for keyboard backlight timeout max value, 570 /*
571 * Check for keyboard backlight timeout max value,
533 * previous kbd backlight implementation set this to 572 * previous kbd backlight implementation set this to
534 * 0x3c0003, and now the new implementation set this 573 * 0x3c0003, and now the new implementation set this
535 * to 0x3c001a, use this to distinguish between them 574 * to 0x3c001a, use this to distinguish between them.
536 */ 575 */
537 if (out[3] == SCI_KBD_TIME_MAX) 576 if (out[3] == SCI_KBD_TIME_MAX)
538 dev->kbd_type = 2; 577 dev->kbd_type = 2;
@@ -667,19 +706,37 @@ static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state)
667static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev) 706static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
668{ 707{
669 acpi_status status; 708 acpi_status status;
670 u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 }; 709 u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 0, 0, 0 };
671 u32 out[TCI_WORDS]; 710 u32 out[TCI_WORDS];
672 711
673 status = tci_raw(dev, in, out); 712 status = tci_raw(dev, in, out);
674 if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) { 713 if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
675 pr_info("ACPI call to get ECO led failed\n"); 714 pr_err("ACPI call to get ECO led failed\n");
676 return 0; 715 } else if (out[0] == TOS_NOT_INSTALLED) {
716 pr_info("ECO led not installed");
717 } else if (out[0] == TOS_INPUT_DATA_ERROR) {
718 /*
719 * If we receive 0x8300 (Input Data Error), it means that the
720 * LED device is present, but that we just screwed the input
721 * parameters.
722 *
723 * Let's query the status of the LED to see if we really have a
724 * success response, indicating the actual presense of the LED,
725 * bail out otherwise.
726 */
727 in[3] = 1;
728 status = tci_raw(dev, in, out);
729 if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE)
730 pr_err("ACPI call to get ECO led failed\n");
731 else if (out[0] == TOS_SUCCESS)
732 return 1;
677 } 733 }
678 734
679 return 1; 735 return 0;
680} 736}
681 737
682static enum led_brightness toshiba_eco_mode_get_status(struct led_classdev *cdev) 738static enum led_brightness
739toshiba_eco_mode_get_status(struct led_classdev *cdev)
683{ 740{
684 struct toshiba_acpi_dev *dev = container_of(cdev, 741 struct toshiba_acpi_dev *dev = container_of(cdev,
685 struct toshiba_acpi_dev, eco_led); 742 struct toshiba_acpi_dev, eco_led);
@@ -721,7 +778,8 @@ static int toshiba_accelerometer_supported(struct toshiba_acpi_dev *dev)
721 u32 out[TCI_WORDS]; 778 u32 out[TCI_WORDS];
722 acpi_status status; 779 acpi_status status;
723 780
724 /* Check if the accelerometer call exists, 781 /*
782 * Check if the accelerometer call exists,
725 * this call also serves as initialization 783 * this call also serves as initialization
726 */ 784 */
727 status = tci_raw(dev, in, out); 785 status = tci_raw(dev, in, out);
@@ -760,6 +818,337 @@ static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
760 return 0; 818 return 0;
761} 819}
762 820
821/* Sleep (Charge and Music) utilities support */
822static int toshiba_usb_sleep_charge_get(struct toshiba_acpi_dev *dev,
823 u32 *mode)
824{
825 u32 result;
826
827 if (!sci_open(dev))
828 return -EIO;
829
830 result = sci_read(dev, SCI_USB_SLEEP_CHARGE, mode);
831 sci_close(dev);
832 if (result == TOS_FAILURE) {
833 pr_err("ACPI call to set USB S&C mode failed\n");
834 return -EIO;
835 } else if (result == TOS_NOT_SUPPORTED) {
836 pr_info("USB Sleep and Charge not supported\n");
837 return -ENODEV;
838 } else if (result == TOS_INPUT_DATA_ERROR) {
839 return -EIO;
840 }
841
842 return 0;
843}
844
845static int toshiba_usb_sleep_charge_set(struct toshiba_acpi_dev *dev,
846 u32 mode)
847{
848 u32 result;
849
850 if (!sci_open(dev))
851 return -EIO;
852
853 result = sci_write(dev, SCI_USB_SLEEP_CHARGE, mode);
854 sci_close(dev);
855 if (result == TOS_FAILURE) {
856 pr_err("ACPI call to set USB S&C mode failed\n");
857 return -EIO;
858 } else if (result == TOS_NOT_SUPPORTED) {
859 pr_info("USB Sleep and Charge not supported\n");
860 return -ENODEV;
861 } else if (result == TOS_INPUT_DATA_ERROR) {
862 return -EIO;
863 }
864
865 return 0;
866}
867
868static int toshiba_sleep_functions_status_get(struct toshiba_acpi_dev *dev,
869 u32 *mode)
870{
871 u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
872 u32 out[TCI_WORDS];
873 acpi_status status;
874
875 if (!sci_open(dev))
876 return -EIO;
877
878 in[5] = SCI_USB_CHARGE_BAT_LVL;
879 status = tci_raw(dev, in, out);
880 sci_close(dev);
881 if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
882 pr_err("ACPI call to get USB S&C battery level failed\n");
883 return -EIO;
884 } else if (out[0] == TOS_NOT_SUPPORTED) {
885 pr_info("USB Sleep and Charge not supported\n");
886 return -ENODEV;
887 } else if (out[0] == TOS_INPUT_DATA_ERROR) {
888 return -EIO;
889 }
890
891 *mode = out[2];
892
893 return 0;
894}
895
896static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev,
897 u32 mode)
898{
899 u32 in[TCI_WORDS] = { SCI_SET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
900 u32 out[TCI_WORDS];
901 acpi_status status;
902
903 if (!sci_open(dev))
904 return -EIO;
905
906 in[2] = mode;
907 in[5] = SCI_USB_CHARGE_BAT_LVL;
908 status = tci_raw(dev, in, out);
909 sci_close(dev);
910 if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
911 pr_err("ACPI call to set USB S&C battery level failed\n");
912 return -EIO;
913 } else if (out[0] == TOS_NOT_SUPPORTED) {
914 pr_info("USB Sleep and Charge not supported\n");
915 return -ENODEV;
916 } else if (out[0] == TOS_INPUT_DATA_ERROR) {
917 return -EIO;
918 }
919
920 return 0;
921}
922
923static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev,
924 u32 *state)
925{
926 u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
927 u32 out[TCI_WORDS];
928 acpi_status status;
929
930 if (!sci_open(dev))
931 return -EIO;
932
933 in[5] = SCI_USB_CHARGE_RAPID_DSP;
934 status = tci_raw(dev, in, out);
935 sci_close(dev);
936 if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
937 pr_err("ACPI call to get USB S&C battery level failed\n");
938 return -EIO;
939 } else if (out[0] == TOS_NOT_SUPPORTED ||
940 out[0] == TOS_INPUT_DATA_ERROR) {
941 pr_info("USB Sleep and Charge not supported\n");
942 return -ENODEV;
943 }
944
945 *state = out[2];
946
947 return 0;
948}
949
950static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev,
951 u32 state)
952{
953 u32 in[TCI_WORDS] = { SCI_SET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
954 u32 out[TCI_WORDS];
955 acpi_status status;
956
957 if (!sci_open(dev))
958 return -EIO;
959
960 in[2] = state;
961 in[5] = SCI_USB_CHARGE_RAPID_DSP;
962 status = tci_raw(dev, in, out);
963 sci_close(dev);
964 if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
965 pr_err("ACPI call to set USB S&C battery level failed\n");
966 return -EIO;
967 } else if (out[0] == TOS_NOT_SUPPORTED) {
968 pr_info("USB Sleep and Charge not supported\n");
969 return -ENODEV;
970 } else if (out[0] == TOS_INPUT_DATA_ERROR) {
971 return -EIO;
972 }
973
974 return 0;
975}
976
977static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state)
978{
979 u32 result;
980
981 if (!sci_open(dev))
982 return -EIO;
983
984 result = sci_read(dev, SCI_USB_SLEEP_MUSIC, state);
985 sci_close(dev);
986 if (result == TOS_FAILURE) {
987 pr_err("ACPI call to set USB S&C mode failed\n");
988 return -EIO;
989 } else if (result == TOS_NOT_SUPPORTED) {
990 pr_info("USB Sleep and Charge not supported\n");
991 return -ENODEV;
992 } else if (result == TOS_INPUT_DATA_ERROR) {
993 return -EIO;
994 }
995
996 return 0;
997}
998
999static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state)
1000{
1001 u32 result;
1002
1003 if (!sci_open(dev))
1004 return -EIO;
1005
1006 result = sci_write(dev, SCI_USB_SLEEP_MUSIC, state);
1007 sci_close(dev);
1008 if (result == TOS_FAILURE) {
1009 pr_err("ACPI call to set USB S&C mode failed\n");
1010 return -EIO;
1011 } else if (result == TOS_NOT_SUPPORTED) {
1012 pr_info("USB Sleep and Charge not supported\n");
1013 return -ENODEV;
1014 } else if (result == TOS_INPUT_DATA_ERROR) {
1015 return -EIO;
1016 }
1017
1018 return 0;
1019}
1020
1021/* Keyboard function keys */
1022static int toshiba_function_keys_get(struct toshiba_acpi_dev *dev, u32 *mode)
1023{
1024 u32 result;
1025
1026 if (!sci_open(dev))
1027 return -EIO;
1028
1029 result = sci_read(dev, SCI_KBD_FUNCTION_KEYS, mode);
1030 sci_close(dev);
1031 if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
1032 pr_err("ACPI call to get KBD function keys failed\n");
1033 return -EIO;
1034 } else if (result == TOS_NOT_SUPPORTED) {
1035 pr_info("KBD function keys not supported\n");
1036 return -ENODEV;
1037 }
1038
1039 return 0;
1040}
1041
1042static int toshiba_function_keys_set(struct toshiba_acpi_dev *dev, u32 mode)
1043{
1044 u32 result;
1045
1046 if (!sci_open(dev))
1047 return -EIO;
1048
1049 result = sci_write(dev, SCI_KBD_FUNCTION_KEYS, mode);
1050 sci_close(dev);
1051 if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
1052 pr_err("ACPI call to set KBD function keys failed\n");
1053 return -EIO;
1054 } else if (result == TOS_NOT_SUPPORTED) {
1055 pr_info("KBD function keys not supported\n");
1056 return -ENODEV;
1057 }
1058
1059 return 0;
1060}
1061
1062/* Panel Power ON */
1063static int toshiba_panel_power_on_get(struct toshiba_acpi_dev *dev, u32 *state)
1064{
1065 u32 result;
1066
1067 if (!sci_open(dev))
1068 return -EIO;
1069
1070 result = sci_read(dev, SCI_PANEL_POWER_ON, state);
1071 sci_close(dev);
1072 if (result == TOS_FAILURE) {
1073 pr_err("ACPI call to get Panel Power ON failed\n");
1074 return -EIO;
1075 } else if (result == TOS_NOT_SUPPORTED) {
1076 pr_info("Panel Power on not supported\n");
1077 return -ENODEV;
1078 } else if (result == TOS_INPUT_DATA_ERROR) {
1079 return -EIO;
1080 }
1081
1082 return 0;
1083}
1084
1085static int toshiba_panel_power_on_set(struct toshiba_acpi_dev *dev, u32 state)
1086{
1087 u32 result;
1088
1089 if (!sci_open(dev))
1090 return -EIO;
1091
1092 result = sci_write(dev, SCI_PANEL_POWER_ON, state);
1093 sci_close(dev);
1094 if (result == TOS_FAILURE) {
1095 pr_err("ACPI call to set Panel Power ON failed\n");
1096 return -EIO;
1097 } else if (result == TOS_NOT_SUPPORTED) {
1098 pr_info("Panel Power ON not supported\n");
1099 return -ENODEV;
1100 } else if (result == TOS_INPUT_DATA_ERROR) {
1101 return -EIO;
1102 }
1103
1104 return 0;
1105}
1106
1107/* USB Three */
1108static int toshiba_usb_three_get(struct toshiba_acpi_dev *dev, u32 *state)
1109{
1110 u32 result;
1111
1112 if (!sci_open(dev))
1113 return -EIO;
1114
1115 result = sci_read(dev, SCI_USB_THREE, state);
1116 sci_close(dev);
1117 if (result == TOS_FAILURE) {
1118 pr_err("ACPI call to get USB 3 failed\n");
1119 return -EIO;
1120 } else if (result == TOS_NOT_SUPPORTED) {
1121 pr_info("USB 3 not supported\n");
1122 return -ENODEV;
1123 } else if (result == TOS_INPUT_DATA_ERROR) {
1124 return -EIO;
1125 }
1126
1127 return 0;
1128}
1129
1130static int toshiba_usb_three_set(struct toshiba_acpi_dev *dev, u32 state)
1131{
1132 u32 result;
1133
1134 if (!sci_open(dev))
1135 return -EIO;
1136
1137 result = sci_write(dev, SCI_USB_THREE, state);
1138 sci_close(dev);
1139 if (result == TOS_FAILURE) {
1140 pr_err("ACPI call to set USB 3 failed\n");
1141 return -EIO;
1142 } else if (result == TOS_NOT_SUPPORTED) {
1143 pr_info("USB 3 not supported\n");
1144 return -ENODEV;
1145 } else if (result == TOS_INPUT_DATA_ERROR) {
1146 return -EIO;
1147 }
1148
1149 return 0;
1150}
1151
763/* Bluetooth rfkill handlers */ 1152/* Bluetooth rfkill handlers */
764 1153
765static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present) 1154static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present)
@@ -870,7 +1259,7 @@ static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
870 return hci_result == TOS_SUCCESS ? 0 : -EIO; 1259 return hci_result == TOS_SUCCESS ? 0 : -EIO;
871} 1260}
872 1261
873static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ; 1262static struct proc_dir_entry *toshiba_proc_dir /*= 0*/;
874 1263
875static int __get_lcd_brightness(struct toshiba_acpi_dev *dev) 1264static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
876{ 1265{
@@ -881,6 +1270,7 @@ static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
881 if (dev->tr_backlight_supported) { 1270 if (dev->tr_backlight_supported) {
882 bool enabled; 1271 bool enabled;
883 int ret = get_tr_backlight_status(dev, &enabled); 1272 int ret = get_tr_backlight_status(dev, &enabled);
1273
884 if (ret) 1274 if (ret)
885 return ret; 1275 return ret;
886 if (enabled) 1276 if (enabled)
@@ -898,6 +1288,7 @@ static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
898static int get_lcd_brightness(struct backlight_device *bd) 1288static int get_lcd_brightness(struct backlight_device *bd)
899{ 1289{
900 struct toshiba_acpi_dev *dev = bl_get_data(bd); 1290 struct toshiba_acpi_dev *dev = bl_get_data(bd);
1291
901 return __get_lcd_brightness(dev); 1292 return __get_lcd_brightness(dev);
902} 1293}
903 1294
@@ -934,6 +1325,7 @@ static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
934 if (dev->tr_backlight_supported) { 1325 if (dev->tr_backlight_supported) {
935 bool enable = !value; 1326 bool enable = !value;
936 int ret = set_tr_backlight_status(dev, enable); 1327 int ret = set_tr_backlight_status(dev, enable);
1328
937 if (ret) 1329 if (ret)
938 return ret; 1330 return ret;
939 if (value) 1331 if (value)
@@ -948,6 +1340,7 @@ static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
948static int set_lcd_status(struct backlight_device *bd) 1340static int set_lcd_status(struct backlight_device *bd)
949{ 1341{
950 struct toshiba_acpi_dev *dev = bl_get_data(bd); 1342 struct toshiba_acpi_dev *dev = bl_get_data(bd);
1343
951 return set_lcd_brightness(dev, bd->props.brightness); 1344 return set_lcd_brightness(dev, bd->props.brightness);
952} 1345}
953 1346
@@ -1005,6 +1398,7 @@ static int video_proc_show(struct seq_file *m, void *v)
1005 int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0; 1398 int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
1006 int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0; 1399 int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
1007 int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0; 1400 int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
1401
1008 seq_printf(m, "lcd_out: %d\n", is_lcd); 1402 seq_printf(m, "lcd_out: %d\n", is_lcd);
1009 seq_printf(m, "crt_out: %d\n", is_crt); 1403 seq_printf(m, "crt_out: %d\n", is_crt);
1010 seq_printf(m, "tv_out: %d\n", is_tv); 1404 seq_printf(m, "tv_out: %d\n", is_tv);
@@ -1042,9 +1436,9 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
1042 1436
1043 buffer = cmd; 1437 buffer = cmd;
1044 1438
1045 /* scan expression. Multiple expressions may be delimited with ; 1439 /*
1046 * 1440 * Scan expression. Multiple expressions may be delimited with ;
1047 * NOTE: to keep scanning simple, invalid fields are ignored 1441 * NOTE: To keep scanning simple, invalid fields are ignored.
1048 */ 1442 */
1049 while (remain) { 1443 while (remain) {
1050 if (sscanf(buffer, " lcd_out : %i", &value) == 1) 1444 if (sscanf(buffer, " lcd_out : %i", &value) == 1)
@@ -1053,12 +1447,11 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
1053 crt_out = value & 1; 1447 crt_out = value & 1;
1054 else if (sscanf(buffer, " tv_out : %i", &value) == 1) 1448 else if (sscanf(buffer, " tv_out : %i", &value) == 1)
1055 tv_out = value & 1; 1449 tv_out = value & 1;
1056 /* advance to one character past the next ; */ 1450 /* Advance to one character past the next ; */
1057 do { 1451 do {
1058 ++buffer; 1452 ++buffer;
1059 --remain; 1453 --remain;
1060 } 1454 } while (remain && *(buffer - 1) != ';');
1061 while (remain && *(buffer - 1) != ';');
1062 } 1455 }
1063 1456
1064 kfree(cmd); 1457 kfree(cmd);
@@ -1066,13 +1459,15 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
1066 ret = get_video_status(dev, &video_out); 1459 ret = get_video_status(dev, &video_out);
1067 if (!ret) { 1460 if (!ret) {
1068 unsigned int new_video_out = video_out; 1461 unsigned int new_video_out = video_out;
1462
1069 if (lcd_out != -1) 1463 if (lcd_out != -1)
1070 _set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out); 1464 _set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out);
1071 if (crt_out != -1) 1465 if (crt_out != -1)
1072 _set_bit(&new_video_out, HCI_VIDEO_OUT_CRT, crt_out); 1466 _set_bit(&new_video_out, HCI_VIDEO_OUT_CRT, crt_out);
1073 if (tv_out != -1) 1467 if (tv_out != -1)
1074 _set_bit(&new_video_out, HCI_VIDEO_OUT_TV, tv_out); 1468 _set_bit(&new_video_out, HCI_VIDEO_OUT_TV, tv_out);
1075 /* To avoid unnecessary video disruption, only write the new 1469 /*
1470 * To avoid unnecessary video disruption, only write the new
1076 * video setting if something changed. */ 1471 * video setting if something changed. */
1077 if (new_video_out != video_out) 1472 if (new_video_out != video_out)
1078 ret = write_acpi_int(METHOD_VIDEO_OUT, new_video_out); 1473 ret = write_acpi_int(METHOD_VIDEO_OUT, new_video_out);
@@ -1135,10 +1530,10 @@ static ssize_t fan_proc_write(struct file *file, const char __user *buf,
1135 if (sscanf(cmd, " force_on : %i", &value) == 1 && 1530 if (sscanf(cmd, " force_on : %i", &value) == 1 &&
1136 value >= 0 && value <= 1) { 1531 value >= 0 && value <= 1) {
1137 hci_result = hci_write1(dev, HCI_FAN, value); 1532 hci_result = hci_write1(dev, HCI_FAN, value);
1138 if (hci_result != TOS_SUCCESS) 1533 if (hci_result == TOS_SUCCESS)
1139 return -EIO;
1140 else
1141 dev->force_fan = value; 1534 dev->force_fan = value;
1535 else
1536 return -EIO;
1142 } else { 1537 } else {
1143 return -EINVAL; 1538 return -EINVAL;
1144 } 1539 }
@@ -1167,11 +1562,13 @@ static int keys_proc_show(struct seq_file *m, void *v)
1167 dev->key_event_valid = 1; 1562 dev->key_event_valid = 1;
1168 dev->last_key_event = value; 1563 dev->last_key_event = value;
1169 } else if (hci_result == TOS_FIFO_EMPTY) { 1564 } else if (hci_result == TOS_FIFO_EMPTY) {
1170 /* better luck next time */ 1565 /* Better luck next time */
1171 } else if (hci_result == TOS_NOT_SUPPORTED) { 1566 } else if (hci_result == TOS_NOT_SUPPORTED) {
1172 /* This is a workaround for an unresolved issue on 1567 /*
1568 * This is a workaround for an unresolved issue on
1173 * some machines where system events sporadically 1569 * some machines where system events sporadically
1174 * become disabled. */ 1570 * become disabled.
1571 */
1175 hci_result = hci_write1(dev, HCI_SYSTEM_EVENT, 1); 1572 hci_result = hci_write1(dev, HCI_SYSTEM_EVENT, 1);
1176 pr_notice("Re-enabled hotkeys\n"); 1573 pr_notice("Re-enabled hotkeys\n");
1177 } else { 1574 } else {
@@ -1203,11 +1600,10 @@ static ssize_t keys_proc_write(struct file *file, const char __user *buf,
1203 return -EFAULT; 1600 return -EFAULT;
1204 cmd[len] = '\0'; 1601 cmd[len] = '\0';
1205 1602
1206 if (sscanf(cmd, " hotkey_ready : %i", &value) == 1 && value == 0) { 1603 if (sscanf(cmd, " hotkey_ready : %i", &value) == 1 && value == 0)
1207 dev->key_event_valid = 0; 1604 dev->key_event_valid = 0;
1208 } else { 1605 else
1209 return -EINVAL; 1606 return -EINVAL;
1210 }
1211 1607
1212 return count; 1608 return count;
1213} 1609}
@@ -1241,7 +1637,8 @@ static const struct file_operations version_proc_fops = {
1241 .release = single_release, 1637 .release = single_release,
1242}; 1638};
1243 1639
1244/* proc and module init 1640/*
1641 * Proc and module init
1245 */ 1642 */
1246 1643
1247#define PROC_TOSHIBA "toshiba" 1644#define PROC_TOSHIBA "toshiba"
@@ -1286,66 +1683,56 @@ static const struct backlight_ops toshiba_backlight_data = {
1286/* 1683/*
1287 * Sysfs files 1684 * Sysfs files
1288 */ 1685 */
1289static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, 1686static ssize_t version_show(struct device *dev,
1290 struct device_attribute *attr, 1687 struct device_attribute *attr, char *buf)
1291 const char *buf, size_t count); 1688{
1292static ssize_t toshiba_kbd_bl_mode_show(struct device *dev, 1689 return sprintf(buf, "%s\n", TOSHIBA_ACPI_VERSION);
1293 struct device_attribute *attr, 1690}
1294 char *buf); 1691static DEVICE_ATTR_RO(version);
1295static ssize_t toshiba_kbd_type_show(struct device *dev,
1296 struct device_attribute *attr,
1297 char *buf);
1298static ssize_t toshiba_available_kbd_modes_show(struct device *dev,
1299 struct device_attribute *attr,
1300 char *buf);
1301static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
1302 struct device_attribute *attr,
1303 const char *buf, size_t count);
1304static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev,
1305 struct device_attribute *attr,
1306 char *buf);
1307static ssize_t toshiba_touchpad_store(struct device *dev,
1308 struct device_attribute *attr,
1309 const char *buf, size_t count);
1310static ssize_t toshiba_touchpad_show(struct device *dev,
1311 struct device_attribute *attr,
1312 char *buf);
1313static ssize_t toshiba_position_show(struct device *dev,
1314 struct device_attribute *attr,
1315 char *buf);
1316
1317static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR,
1318 toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store);
1319static DEVICE_ATTR(kbd_type, S_IRUGO, toshiba_kbd_type_show, NULL);
1320static DEVICE_ATTR(available_kbd_modes, S_IRUGO,
1321 toshiba_available_kbd_modes_show, NULL);
1322static DEVICE_ATTR(kbd_backlight_timeout, S_IRUGO | S_IWUSR,
1323 toshiba_kbd_bl_timeout_show, toshiba_kbd_bl_timeout_store);
1324static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR,
1325 toshiba_touchpad_show, toshiba_touchpad_store);
1326static DEVICE_ATTR(position, S_IRUGO, toshiba_position_show, NULL);
1327 1692
1328static struct attribute *toshiba_attributes[] = { 1693static ssize_t fan_store(struct device *dev,
1329 &dev_attr_kbd_backlight_mode.attr, 1694 struct device_attribute *attr,
1330 &dev_attr_kbd_type.attr, 1695 const char *buf, size_t count)
1331 &dev_attr_available_kbd_modes.attr, 1696{
1332 &dev_attr_kbd_backlight_timeout.attr, 1697 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1333 &dev_attr_touchpad.attr, 1698 u32 result;
1334 &dev_attr_position.attr, 1699 int state;
1335 NULL, 1700 int ret;
1336};
1337 1701
1338static umode_t toshiba_sysfs_is_visible(struct kobject *, 1702 ret = kstrtoint(buf, 0, &state);
1339 struct attribute *, int); 1703 if (ret)
1704 return ret;
1340 1705
1341static struct attribute_group toshiba_attr_group = { 1706 if (state != 0 && state != 1)
1342 .is_visible = toshiba_sysfs_is_visible, 1707 return -EINVAL;
1343 .attrs = toshiba_attributes,
1344};
1345 1708
1346static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, 1709 result = hci_write1(toshiba, HCI_FAN, state);
1347 struct device_attribute *attr, 1710 if (result == TOS_FAILURE)
1348 const char *buf, size_t count) 1711 return -EIO;
1712 else if (result == TOS_NOT_SUPPORTED)
1713 return -ENODEV;
1714
1715 return count;
1716}
1717
1718static ssize_t fan_show(struct device *dev,
1719 struct device_attribute *attr, char *buf)
1720{
1721 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1722 u32 value;
1723 int ret;
1724
1725 ret = get_fan_status(toshiba, &value);
1726 if (ret)
1727 return ret;
1728
1729 return sprintf(buf, "%d\n", value);
1730}
1731static DEVICE_ATTR_RW(fan);
1732
1733static ssize_t kbd_backlight_mode_store(struct device *dev,
1734 struct device_attribute *attr,
1735 const char *buf, size_t count)
1349{ 1736{
1350 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1737 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1351 int mode; 1738 int mode;
@@ -1369,7 +1756,8 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
1369 return -EINVAL; 1756 return -EINVAL;
1370 } 1757 }
1371 1758
1372 /* Set the Keyboard Backlight Mode where: 1759 /*
1760 * Set the Keyboard Backlight Mode where:
1373 * Auto - KBD backlight turns off automatically in given time 1761 * Auto - KBD backlight turns off automatically in given time
1374 * FN-Z - KBD backlight "toggles" when hotkey pressed 1762 * FN-Z - KBD backlight "toggles" when hotkey pressed
1375 * ON - KBD backlight is always on 1763 * ON - KBD backlight is always on
@@ -1400,9 +1788,9 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
1400 return count; 1788 return count;
1401} 1789}
1402 1790
1403static ssize_t toshiba_kbd_bl_mode_show(struct device *dev, 1791static ssize_t kbd_backlight_mode_show(struct device *dev,
1404 struct device_attribute *attr, 1792 struct device_attribute *attr,
1405 char *buf) 1793 char *buf)
1406{ 1794{
1407 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1795 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1408 u32 time; 1796 u32 time;
@@ -1412,19 +1800,20 @@ static ssize_t toshiba_kbd_bl_mode_show(struct device *dev,
1412 1800
1413 return sprintf(buf, "%i\n", time & SCI_KBD_MODE_MASK); 1801 return sprintf(buf, "%i\n", time & SCI_KBD_MODE_MASK);
1414} 1802}
1803static DEVICE_ATTR_RW(kbd_backlight_mode);
1415 1804
1416static ssize_t toshiba_kbd_type_show(struct device *dev, 1805static ssize_t kbd_type_show(struct device *dev,
1417 struct device_attribute *attr, 1806 struct device_attribute *attr, char *buf)
1418 char *buf)
1419{ 1807{
1420 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1808 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1421 1809
1422 return sprintf(buf, "%d\n", toshiba->kbd_type); 1810 return sprintf(buf, "%d\n", toshiba->kbd_type);
1423} 1811}
1812static DEVICE_ATTR_RO(kbd_type);
1424 1813
1425static ssize_t toshiba_available_kbd_modes_show(struct device *dev, 1814static ssize_t available_kbd_modes_show(struct device *dev,
1426 struct device_attribute *attr, 1815 struct device_attribute *attr,
1427 char *buf) 1816 char *buf)
1428{ 1817{
1429 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1818 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1430 1819
@@ -1435,10 +1824,11 @@ static ssize_t toshiba_available_kbd_modes_show(struct device *dev,
1435 return sprintf(buf, "%x %x %x\n", 1824 return sprintf(buf, "%x %x %x\n",
1436 SCI_KBD_MODE_AUTO, SCI_KBD_MODE_ON, SCI_KBD_MODE_OFF); 1825 SCI_KBD_MODE_AUTO, SCI_KBD_MODE_ON, SCI_KBD_MODE_OFF);
1437} 1826}
1827static DEVICE_ATTR_RO(available_kbd_modes);
1438 1828
1439static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev, 1829static ssize_t kbd_backlight_timeout_store(struct device *dev,
1440 struct device_attribute *attr, 1830 struct device_attribute *attr,
1441 const char *buf, size_t count) 1831 const char *buf, size_t count)
1442{ 1832{
1443 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1833 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1444 int time; 1834 int time;
@@ -1479,9 +1869,9 @@ static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
1479 return count; 1869 return count;
1480} 1870}
1481 1871
1482static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev, 1872static ssize_t kbd_backlight_timeout_show(struct device *dev,
1483 struct device_attribute *attr, 1873 struct device_attribute *attr,
1484 char *buf) 1874 char *buf)
1485{ 1875{
1486 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1876 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1487 u32 time; 1877 u32 time;
@@ -1491,10 +1881,11 @@ static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev,
1491 1881
1492 return sprintf(buf, "%i\n", time >> HCI_MISC_SHIFT); 1882 return sprintf(buf, "%i\n", time >> HCI_MISC_SHIFT);
1493} 1883}
1884static DEVICE_ATTR_RW(kbd_backlight_timeout);
1494 1885
1495static ssize_t toshiba_touchpad_store(struct device *dev, 1886static ssize_t touchpad_store(struct device *dev,
1496 struct device_attribute *attr, 1887 struct device_attribute *attr,
1497 const char *buf, size_t count) 1888 const char *buf, size_t count)
1498{ 1889{
1499 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1890 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1500 int state; 1891 int state;
@@ -1514,8 +1905,8 @@ static ssize_t toshiba_touchpad_store(struct device *dev,
1514 return count; 1905 return count;
1515} 1906}
1516 1907
1517static ssize_t toshiba_touchpad_show(struct device *dev, 1908static ssize_t touchpad_show(struct device *dev,
1518 struct device_attribute *attr, char *buf) 1909 struct device_attribute *attr, char *buf)
1519{ 1910{
1520 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1911 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1521 u32 state; 1912 u32 state;
@@ -1527,9 +1918,10 @@ static ssize_t toshiba_touchpad_show(struct device *dev,
1527 1918
1528 return sprintf(buf, "%i\n", state); 1919 return sprintf(buf, "%i\n", state);
1529} 1920}
1921static DEVICE_ATTR_RW(touchpad);
1530 1922
1531static ssize_t toshiba_position_show(struct device *dev, 1923static ssize_t position_show(struct device *dev,
1532 struct device_attribute *attr, char *buf) 1924 struct device_attribute *attr, char *buf)
1533{ 1925{
1534 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1926 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1535 u32 xyval, zval, tmp; 1927 u32 xyval, zval, tmp;
@@ -1548,6 +1940,336 @@ static ssize_t toshiba_position_show(struct device *dev,
1548 1940
1549 return sprintf(buf, "%d %d %d\n", x, y, z); 1941 return sprintf(buf, "%d %d %d\n", x, y, z);
1550} 1942}
1943static DEVICE_ATTR_RO(position);
1944
1945static ssize_t usb_sleep_charge_show(struct device *dev,
1946 struct device_attribute *attr, char *buf)
1947{
1948 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1949 u32 mode;
1950 int ret;
1951
1952 ret = toshiba_usb_sleep_charge_get(toshiba, &mode);
1953 if (ret < 0)
1954 return ret;
1955
1956 return sprintf(buf, "%x\n", mode & SCI_USB_CHARGE_MODE_MASK);
1957}
1958
1959static ssize_t usb_sleep_charge_store(struct device *dev,
1960 struct device_attribute *attr,
1961 const char *buf, size_t count)
1962{
1963 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1964 u32 mode;
1965 int state;
1966 int ret;
1967
1968 ret = kstrtoint(buf, 0, &state);
1969 if (ret)
1970 return ret;
1971 /*
1972 * Check for supported values, where:
1973 * 0 - Disabled
1974 * 1 - Alternate (Non USB conformant devices that require more power)
1975 * 2 - Auto (USB conformant devices)
1976 */
1977 if (state != 0 && state != 1 && state != 2)
1978 return -EINVAL;
1979
1980 /* Set the USB charging mode to internal value */
1981 if (state == 0)
1982 mode = SCI_USB_CHARGE_DISABLED;
1983 else if (state == 1)
1984 mode = SCI_USB_CHARGE_ALTERNATE;
1985 else if (state == 2)
1986 mode = SCI_USB_CHARGE_AUTO;
1987
1988 ret = toshiba_usb_sleep_charge_set(toshiba, mode);
1989 if (ret)
1990 return ret;
1991
1992 return count;
1993}
1994static DEVICE_ATTR_RW(usb_sleep_charge);
1995
1996static ssize_t sleep_functions_on_battery_show(struct device *dev,
1997 struct device_attribute *attr,
1998 char *buf)
1999{
2000 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2001 u32 state;
2002 int bat_lvl;
2003 int status;
2004 int ret;
2005 int tmp;
2006
2007 ret = toshiba_sleep_functions_status_get(toshiba, &state);
2008 if (ret < 0)
2009 return ret;
2010
2011 /* Determine the status: 0x4 - Enabled | 0x1 - Disabled */
2012 tmp = state & SCI_USB_CHARGE_BAT_MASK;
2013 status = (tmp == 0x4) ? 1 : 0;
2014 /* Determine the battery level set */
2015 bat_lvl = state >> HCI_MISC_SHIFT;
2016
2017 return sprintf(buf, "%d %d\n", status, bat_lvl);
2018}
2019
2020static ssize_t sleep_functions_on_battery_store(struct device *dev,
2021 struct device_attribute *attr,
2022 const char *buf, size_t count)
2023{
2024 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2025 u32 status;
2026 int value;
2027 int ret;
2028 int tmp;
2029
2030 ret = kstrtoint(buf, 0, &value);
2031 if (ret)
2032 return ret;
2033
2034 /*
2035 * Set the status of the function:
2036 * 0 - Disabled
2037 * 1-100 - Enabled
2038 */
2039 if (value < 0 || value > 100)
2040 return -EINVAL;
2041
2042 if (value == 0) {
2043 tmp = toshiba->usbsc_bat_level << HCI_MISC_SHIFT;
2044 status = tmp | SCI_USB_CHARGE_BAT_LVL_OFF;
2045 } else {
2046 tmp = value << HCI_MISC_SHIFT;
2047 status = tmp | SCI_USB_CHARGE_BAT_LVL_ON;
2048 }
2049 ret = toshiba_sleep_functions_status_set(toshiba, status);
2050 if (ret < 0)
2051 return ret;
2052
2053 toshiba->usbsc_bat_level = status >> HCI_MISC_SHIFT;
2054
2055 return count;
2056}
2057static DEVICE_ATTR_RW(sleep_functions_on_battery);
2058
2059static ssize_t usb_rapid_charge_show(struct device *dev,
2060 struct device_attribute *attr, char *buf)
2061{
2062 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2063 u32 state;
2064 int ret;
2065
2066 ret = toshiba_usb_rapid_charge_get(toshiba, &state);
2067 if (ret < 0)
2068 return ret;
2069
2070 return sprintf(buf, "%d\n", state);
2071}
2072
2073static ssize_t usb_rapid_charge_store(struct device *dev,
2074 struct device_attribute *attr,
2075 const char *buf, size_t count)
2076{
2077 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2078 int state;
2079 int ret;
2080
2081 ret = kstrtoint(buf, 0, &state);
2082 if (ret)
2083 return ret;
2084 if (state != 0 && state != 1)
2085 return -EINVAL;
2086
2087 ret = toshiba_usb_rapid_charge_set(toshiba, state);
2088 if (ret)
2089 return ret;
2090
2091 return count;
2092}
2093static DEVICE_ATTR_RW(usb_rapid_charge);
2094
2095static ssize_t usb_sleep_music_show(struct device *dev,
2096 struct device_attribute *attr, char *buf)
2097{
2098 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2099 u32 state;
2100 int ret;
2101
2102 ret = toshiba_usb_sleep_music_get(toshiba, &state);
2103 if (ret < 0)
2104 return ret;
2105
2106 return sprintf(buf, "%d\n", state);
2107}
2108
2109static ssize_t usb_sleep_music_store(struct device *dev,
2110 struct device_attribute *attr,
2111 const char *buf, size_t count)
2112{
2113 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2114 int state;
2115 int ret;
2116
2117 ret = kstrtoint(buf, 0, &state);
2118 if (ret)
2119 return ret;
2120 if (state != 0 && state != 1)
2121 return -EINVAL;
2122
2123 ret = toshiba_usb_sleep_music_set(toshiba, state);
2124 if (ret)
2125 return ret;
2126
2127 return count;
2128}
2129static DEVICE_ATTR_RW(usb_sleep_music);
2130
2131static ssize_t kbd_function_keys_show(struct device *dev,
2132 struct device_attribute *attr, char *buf)
2133{
2134 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2135 int mode;
2136 int ret;
2137
2138 ret = toshiba_function_keys_get(toshiba, &mode);
2139 if (ret < 0)
2140 return ret;
2141
2142 return sprintf(buf, "%d\n", mode);
2143}
2144
2145static ssize_t kbd_function_keys_store(struct device *dev,
2146 struct device_attribute *attr,
2147 const char *buf, size_t count)
2148{
2149 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2150 int mode;
2151 int ret;
2152
2153 ret = kstrtoint(buf, 0, &mode);
2154 if (ret)
2155 return ret;
2156 /*
2157 * Check for the function keys mode where:
2158 * 0 - Normal operation (F{1-12} as usual and hotkeys via FN-F{1-12})
2159 * 1 - Special functions (Opposite of the above setting)
2160 */
2161 if (mode != 0 && mode != 1)
2162 return -EINVAL;
2163
2164 ret = toshiba_function_keys_set(toshiba, mode);
2165 if (ret)
2166 return ret;
2167
2168 pr_info("Reboot for changes to KBD Function Keys to take effect");
2169
2170 return count;
2171}
2172static DEVICE_ATTR_RW(kbd_function_keys);
2173
2174static ssize_t panel_power_on_show(struct device *dev,
2175 struct device_attribute *attr, char *buf)
2176{
2177 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2178 u32 state;
2179 int ret;
2180
2181 ret = toshiba_panel_power_on_get(toshiba, &state);
2182 if (ret < 0)
2183 return ret;
2184
2185 return sprintf(buf, "%d\n", state);
2186}
2187
2188static ssize_t panel_power_on_store(struct device *dev,
2189 struct device_attribute *attr,
2190 const char *buf, size_t count)
2191{
2192 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2193 int state;
2194 int ret;
2195
2196 ret = kstrtoint(buf, 0, &state);
2197 if (ret)
2198 return ret;
2199 if (state != 0 && state != 1)
2200 return -EINVAL;
2201
2202 ret = toshiba_panel_power_on_set(toshiba, state);
2203 if (ret)
2204 return ret;
2205
2206 pr_info("Reboot for changes to Panel Power ON to take effect");
2207
2208 return count;
2209}
2210static DEVICE_ATTR_RW(panel_power_on);
2211
2212static ssize_t usb_three_show(struct device *dev,
2213 struct device_attribute *attr, char *buf)
2214{
2215 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2216 u32 state;
2217 int ret;
2218
2219 ret = toshiba_usb_three_get(toshiba, &state);
2220 if (ret < 0)
2221 return ret;
2222
2223 return sprintf(buf, "%d\n", state);
2224}
2225
2226static ssize_t usb_three_store(struct device *dev,
2227 struct device_attribute *attr,
2228 const char *buf, size_t count)
2229{
2230 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2231 int state;
2232 int ret;
2233
2234 ret = kstrtoint(buf, 0, &state);
2235 if (ret)
2236 return ret;
2237 /*
2238 * Check for USB 3 mode where:
2239 * 0 - Disabled (Acts like a USB 2 port, saving power)
2240 * 1 - Enabled
2241 */
2242 if (state != 0 && state != 1)
2243 return -EINVAL;
2244
2245 ret = toshiba_usb_three_set(toshiba, state);
2246 if (ret)
2247 return ret;
2248
2249 pr_info("Reboot for changes to USB 3 to take effect");
2250
2251 return count;
2252}
2253static DEVICE_ATTR_RW(usb_three);
2254
2255static struct attribute *toshiba_attributes[] = {
2256 &dev_attr_version.attr,
2257 &dev_attr_fan.attr,
2258 &dev_attr_kbd_backlight_mode.attr,
2259 &dev_attr_kbd_type.attr,
2260 &dev_attr_available_kbd_modes.attr,
2261 &dev_attr_kbd_backlight_timeout.attr,
2262 &dev_attr_touchpad.attr,
2263 &dev_attr_position.attr,
2264 &dev_attr_usb_sleep_charge.attr,
2265 &dev_attr_sleep_functions_on_battery.attr,
2266 &dev_attr_usb_rapid_charge.attr,
2267 &dev_attr_usb_sleep_music.attr,
2268 &dev_attr_kbd_function_keys.attr,
2269 &dev_attr_panel_power_on.attr,
2270 &dev_attr_usb_three.attr,
2271 NULL,
2272};
1551 2273
1552static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, 2274static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
1553 struct attribute *attr, int idx) 2275 struct attribute *attr, int idx)
@@ -1556,7 +2278,9 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
1556 struct toshiba_acpi_dev *drv = dev_get_drvdata(dev); 2278 struct toshiba_acpi_dev *drv = dev_get_drvdata(dev);
1557 bool exists = true; 2279 bool exists = true;
1558 2280
1559 if (attr == &dev_attr_kbd_backlight_mode.attr) 2281 if (attr == &dev_attr_fan.attr)
2282 exists = (drv->fan_supported) ? true : false;
2283 else if (attr == &dev_attr_kbd_backlight_mode.attr)
1560 exists = (drv->kbd_illum_supported) ? true : false; 2284 exists = (drv->kbd_illum_supported) ? true : false;
1561 else if (attr == &dev_attr_kbd_backlight_timeout.attr) 2285 else if (attr == &dev_attr_kbd_backlight_timeout.attr)
1562 exists = (drv->kbd_mode == SCI_KBD_MODE_AUTO) ? true : false; 2286 exists = (drv->kbd_mode == SCI_KBD_MODE_AUTO) ? true : false;
@@ -1564,10 +2288,29 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
1564 exists = (drv->touchpad_supported) ? true : false; 2288 exists = (drv->touchpad_supported) ? true : false;
1565 else if (attr == &dev_attr_position.attr) 2289 else if (attr == &dev_attr_position.attr)
1566 exists = (drv->accelerometer_supported) ? true : false; 2290 exists = (drv->accelerometer_supported) ? true : false;
2291 else if (attr == &dev_attr_usb_sleep_charge.attr)
2292 exists = (drv->usb_sleep_charge_supported) ? true : false;
2293 else if (attr == &dev_attr_sleep_functions_on_battery.attr)
2294 exists = (drv->usb_sleep_charge_supported) ? true : false;
2295 else if (attr == &dev_attr_usb_rapid_charge.attr)
2296 exists = (drv->usb_rapid_charge_supported) ? true : false;
2297 else if (attr == &dev_attr_usb_sleep_music.attr)
2298 exists = (drv->usb_sleep_music_supported) ? true : false;
2299 else if (attr == &dev_attr_kbd_function_keys.attr)
2300 exists = (drv->kbd_function_keys_supported) ? true : false;
2301 else if (attr == &dev_attr_panel_power_on.attr)
2302 exists = (drv->panel_power_on_supported) ? true : false;
2303 else if (attr == &dev_attr_usb_three.attr)
2304 exists = (drv->usb_three_supported) ? true : false;
1567 2305
1568 return exists ? attr->mode : 0; 2306 return exists ? attr->mode : 0;
1569} 2307}
1570 2308
2309static struct attribute_group toshiba_attr_group = {
2310 .is_visible = toshiba_sysfs_is_visible,
2311 .attrs = toshiba_attributes,
2312};
2313
1571/* 2314/*
1572 * Hotkeys 2315 * Hotkeys
1573 */ 2316 */
@@ -1644,7 +2387,7 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
1644 if (scancode == 0x100) 2387 if (scancode == 0x100)
1645 return; 2388 return;
1646 2389
1647 /* act on key press; ignore key release */ 2390 /* Act on key press; ignore key release */
1648 if (scancode & 0x80) 2391 if (scancode & 0x80)
1649 return; 2392 return;
1650 2393
@@ -1680,7 +2423,7 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
1680 hci_result = 2423 hci_result =
1681 hci_write1(dev, HCI_SYSTEM_EVENT, 1); 2424 hci_write1(dev, HCI_SYSTEM_EVENT, 1);
1682 pr_notice("Re-enabled hotkeys\n"); 2425 pr_notice("Re-enabled hotkeys\n");
1683 /* fall through */ 2426 /* Fall through */
1684 default: 2427 default:
1685 retries--; 2428 retries--;
1686 break; 2429 break;
@@ -1802,7 +2545,7 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
1802 props.type = BACKLIGHT_PLATFORM; 2545 props.type = BACKLIGHT_PLATFORM;
1803 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; 2546 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
1804 2547
1805 /* adding an extra level and having 0 change to transflective mode */ 2548 /* Adding an extra level and having 0 change to transflective mode */
1806 if (dev->tr_backlight_supported) 2549 if (dev->tr_backlight_supported)
1807 props.max_brightness++; 2550 props.max_brightness++;
1808 2551
@@ -1973,6 +2716,24 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
1973 ret = toshiba_accelerometer_supported(dev); 2716 ret = toshiba_accelerometer_supported(dev);
1974 dev->accelerometer_supported = !ret; 2717 dev->accelerometer_supported = !ret;
1975 2718
2719 ret = toshiba_usb_sleep_charge_get(dev, &dummy);
2720 dev->usb_sleep_charge_supported = !ret;
2721
2722 ret = toshiba_usb_rapid_charge_get(dev, &dummy);
2723 dev->usb_rapid_charge_supported = !ret;
2724
2725 ret = toshiba_usb_sleep_music_get(dev, &dummy);
2726 dev->usb_sleep_music_supported = !ret;
2727
2728 ret = toshiba_function_keys_get(dev, &dummy);
2729 dev->kbd_function_keys_supported = !ret;
2730
2731 ret = toshiba_panel_power_on_get(dev, &dummy);
2732 dev->panel_power_on_supported = !ret;
2733
2734 ret = toshiba_usb_three_get(dev, &dummy);
2735 dev->usb_three_supported = !ret;
2736
1976 /* Determine whether or not BIOS supports fan and video interfaces */ 2737 /* Determine whether or not BIOS supports fan and video interfaces */
1977 2738
1978 ret = get_video_status(dev, &dummy); 2739 ret = get_video_status(dev, &dummy);
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index 782e82289571..f980ff7166e9 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -179,8 +179,9 @@ int pnp_check_port(struct pnp_dev *dev, struct resource *res)
179 /* check if the resource is already in use, skip if the 179 /* check if the resource is already in use, skip if the
180 * device is active because it itself may be in use */ 180 * device is active because it itself may be in use */
181 if (!dev->active) { 181 if (!dev->active) {
182 if (__check_region(&ioport_resource, *port, length(port, end))) 182 if (!request_region(*port, length(port, end), "pnp"))
183 return 0; 183 return 0;
184 release_region(*port, length(port, end));
184 } 185 }
185 186
186 /* check if the resource is reserved */ 187 /* check if the resource is reserved */
@@ -241,8 +242,9 @@ int pnp_check_mem(struct pnp_dev *dev, struct resource *res)
241 /* check if the resource is already in use, skip if the 242 /* check if the resource is already in use, skip if the
242 * device is active because it itself may be in use */ 243 * device is active because it itself may be in use */
243 if (!dev->active) { 244 if (!dev->active) {
244 if (check_mem_region(*addr, length(addr, end))) 245 if (!request_mem_region(*addr, length(addr, end), "pnp"))
245 return 0; 246 return 0;
247 release_mem_region(*addr, length(addr, end));
246 } 248 }
247 249
248 /* check if the resource is reserved */ 250 /* check if the resource is reserved */
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index a3ecf5809634..b1541f40fd8d 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -53,6 +53,7 @@ config PWM_ATMEL
53config PWM_ATMEL_HLCDC_PWM 53config PWM_ATMEL_HLCDC_PWM
54 tristate "Atmel HLCDC PWM support" 54 tristate "Atmel HLCDC PWM support"
55 depends on MFD_ATMEL_HLCDC 55 depends on MFD_ATMEL_HLCDC
56 depends on HAVE_CLK
56 help 57 help
57 Generic PWM framework driver for the PWM output of the HLCDC 58 Generic PWM framework driver for the PWM output of the HLCDC
58 (Atmel High-end LCD Controller). This PWM output is mainly used 59 (Atmel High-end LCD Controller). This PWM output is mainly used
@@ -130,6 +131,19 @@ config PWM_FSL_FTM
130 To compile this driver as a module, choose M here: the module 131 To compile this driver as a module, choose M here: the module
131 will be called pwm-fsl-ftm. 132 will be called pwm-fsl-ftm.
132 133
134config PWM_IMG
135 tristate "Imagination Technologies PWM driver"
136 depends on HAS_IOMEM
137 depends on MFD_SYSCON
138 depends on COMMON_CLK
139 depends on MIPS || COMPILE_TEST
140 help
141 Generic PWM framework driver for Imagination Technologies
142 PWM block which supports 4 channels.
143
144 To compile this driver as a module, choose M here: the module
145 will be called pwm-img
146
133config PWM_IMX 147config PWM_IMX
134 tristate "i.MX PWM support" 148 tristate "i.MX PWM support"
135 depends on ARCH_MXC 149 depends on ARCH_MXC
@@ -283,6 +297,16 @@ config PWM_STI
283 To compile this driver as a module, choose M here: the module 297 To compile this driver as a module, choose M here: the module
284 will be called pwm-sti. 298 will be called pwm-sti.
285 299
300config PWM_SUN4I
301 tristate "Allwinner PWM support"
302 depends on ARCH_SUNXI || COMPILE_TEST
303 depends on HAS_IOMEM && COMMON_CLK
304 help
305 Generic PWM framework driver for Allwinner SoCs.
306
307 To compile this driver as a module, choose M here: the module
308 will be called pwm-sun4i.
309
286config PWM_TEGRA 310config PWM_TEGRA
287 tristate "NVIDIA Tegra PWM support" 311 tristate "NVIDIA Tegra PWM support"
288 depends on ARCH_TEGRA 312 depends on ARCH_TEGRA
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 65259ac1e8de..ec50eb5b5a8f 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
10obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o 10obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o
11obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o 11obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
12obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o 12obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
13obj-$(CONFIG_PWM_IMG) += pwm-img.o
13obj-$(CONFIG_PWM_IMX) += pwm-imx.o 14obj-$(CONFIG_PWM_IMX) += pwm-imx.o
14obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o 15obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
15obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o 16obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
@@ -26,6 +27,7 @@ obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o
26obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o 27obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
27obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o 28obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o
28obj-$(CONFIG_PWM_STI) += pwm-sti.o 29obj-$(CONFIG_PWM_STI) += pwm-sti.o
30obj-$(CONFIG_PWM_SUN4I) += pwm-sun4i.o
29obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o 31obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
30obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o 32obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
31obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o 33obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 966497d10c6e..810aef3f4c3e 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -192,7 +192,7 @@ static void of_pwmchip_add(struct pwm_chip *chip)
192 192
193static void of_pwmchip_remove(struct pwm_chip *chip) 193static void of_pwmchip_remove(struct pwm_chip *chip)
194{ 194{
195 if (chip->dev && chip->dev->of_node) 195 if (chip->dev)
196 of_node_put(chip->dev->of_node); 196 of_node_put(chip->dev->of_node);
197} 197}
198 198
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index e7a785fadcdf..522f7075bb1a 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -64,6 +64,9 @@ static int atmel_hlcdc_pwm_config(struct pwm_chip *c,
64 64
65 if (!chip->errata || !chip->errata->slow_clk_erratum) { 65 if (!chip->errata || !chip->errata->slow_clk_erratum) {
66 clk_freq = clk_get_rate(new_clk); 66 clk_freq = clk_get_rate(new_clk);
67 if (!clk_freq)
68 return -EINVAL;
69
67 clk_period_ns = (u64)NSEC_PER_SEC * 256; 70 clk_period_ns = (u64)NSEC_PER_SEC * 256;
68 do_div(clk_period_ns, clk_freq); 71 do_div(clk_period_ns, clk_freq);
69 } 72 }
@@ -73,6 +76,9 @@ static int atmel_hlcdc_pwm_config(struct pwm_chip *c,
73 clk_period_ns > period_ns) { 76 clk_period_ns > period_ns) {
74 new_clk = hlcdc->sys_clk; 77 new_clk = hlcdc->sys_clk;
75 clk_freq = clk_get_rate(new_clk); 78 clk_freq = clk_get_rate(new_clk);
79 if (!clk_freq)
80 return -EINVAL;
81
76 clk_period_ns = (u64)NSEC_PER_SEC * 256; 82 clk_period_ns = (u64)NSEC_PER_SEC * 256;
77 do_div(clk_period_ns, clk_freq); 83 do_div(clk_period_ns, clk_freq);
78 } 84 }
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
new file mode 100644
index 000000000000..476171a768d6
--- /dev/null
+++ b/drivers/pwm/pwm-img.c
@@ -0,0 +1,249 @@
1/*
2 * Imagination Technologies Pulse Width Modulator driver
3 *
4 * Copyright (c) 2014-2015, Imagination Technologies
5 *
6 * Based on drivers/pwm/pwm-tegra.c, Copyright (c) 2010, NVIDIA Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License.
11 */
12
13#include <linux/clk.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/mfd/syscon.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/platform_device.h>
20#include <linux/pwm.h>
21#include <linux/regmap.h>
22#include <linux/slab.h>
23
24/* PWM registers */
25#define PWM_CTRL_CFG 0x0000
26#define PWM_CTRL_CFG_NO_SUB_DIV 0
27#define PWM_CTRL_CFG_SUB_DIV0 1
28#define PWM_CTRL_CFG_SUB_DIV1 2
29#define PWM_CTRL_CFG_SUB_DIV0_DIV1 3
30#define PWM_CTRL_CFG_DIV_SHIFT(ch) ((ch) * 2 + 4)
31#define PWM_CTRL_CFG_DIV_MASK 0x3
32
33#define PWM_CH_CFG(ch) (0x4 + (ch) * 4)
34#define PWM_CH_CFG_TMBASE_SHIFT 0
35#define PWM_CH_CFG_DUTY_SHIFT 16
36
37#define PERIP_PWM_PDM_CONTROL 0x0140
38#define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1
39#define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4)
40
41#define MAX_TMBASE_STEPS 65536
42
43struct img_pwm_chip {
44 struct device *dev;
45 struct pwm_chip chip;
46 struct clk *pwm_clk;
47 struct clk *sys_clk;
48 void __iomem *base;
49 struct regmap *periph_regs;
50};
51
52static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
53{
54 return container_of(chip, struct img_pwm_chip, chip);
55}
56
57static inline void img_pwm_writel(struct img_pwm_chip *chip,
58 u32 reg, u32 val)
59{
60 writel(val, chip->base + reg);
61}
62
63static inline u32 img_pwm_readl(struct img_pwm_chip *chip,
64 u32 reg)
65{
66 return readl(chip->base + reg);
67}
68
69static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
70 int duty_ns, int period_ns)
71{
72 u32 val, div, duty, timebase;
73 unsigned long mul, output_clk_hz, input_clk_hz;
74 struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
75
76 input_clk_hz = clk_get_rate(pwm_chip->pwm_clk);
77 output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns);
78
79 mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz);
80 if (mul <= MAX_TMBASE_STEPS) {
81 div = PWM_CTRL_CFG_NO_SUB_DIV;
82 timebase = DIV_ROUND_UP(mul, 1);
83 } else if (mul <= MAX_TMBASE_STEPS * 8) {
84 div = PWM_CTRL_CFG_SUB_DIV0;
85 timebase = DIV_ROUND_UP(mul, 8);
86 } else if (mul <= MAX_TMBASE_STEPS * 64) {
87 div = PWM_CTRL_CFG_SUB_DIV1;
88 timebase = DIV_ROUND_UP(mul, 64);
89 } else if (mul <= MAX_TMBASE_STEPS * 512) {
90 div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
91 timebase = DIV_ROUND_UP(mul, 512);
92 } else if (mul > MAX_TMBASE_STEPS * 512) {
93 dev_err(chip->dev,
94 "failed to configure timebase steps/divider value\n");
95 return -EINVAL;
96 }
97
98 duty = DIV_ROUND_UP(timebase * duty_ns, period_ns);
99
100 val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
101 val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm));
102 val |= (div & PWM_CTRL_CFG_DIV_MASK) <<
103 PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm);
104 img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
105
106 val = (duty << PWM_CH_CFG_DUTY_SHIFT) |
107 (timebase << PWM_CH_CFG_TMBASE_SHIFT);
108 img_pwm_writel(pwm_chip, PWM_CH_CFG(pwm->hwpwm), val);
109
110 return 0;
111}
112
113static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
114{
115 u32 val;
116 struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
117
118 val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
119 val |= BIT(pwm->hwpwm);
120 img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
121
122 regmap_update_bits(pwm_chip->periph_regs, PERIP_PWM_PDM_CONTROL,
123 PERIP_PWM_PDM_CONTROL_CH_MASK <<
124 PERIP_PWM_PDM_CONTROL_CH_SHIFT(pwm->hwpwm), 0);
125
126 return 0;
127}
128
129static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
130{
131 u32 val;
132 struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
133
134 val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
135 val &= ~BIT(pwm->hwpwm);
136 img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
137}
138
139static const struct pwm_ops img_pwm_ops = {
140 .config = img_pwm_config,
141 .enable = img_pwm_enable,
142 .disable = img_pwm_disable,
143 .owner = THIS_MODULE,
144};
145
146static int img_pwm_probe(struct platform_device *pdev)
147{
148 int ret;
149 struct resource *res;
150 struct img_pwm_chip *pwm;
151
152 pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
153 if (!pwm)
154 return -ENOMEM;
155
156 pwm->dev = &pdev->dev;
157
158 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
159 pwm->base = devm_ioremap_resource(&pdev->dev, res);
160 if (IS_ERR(pwm->base))
161 return PTR_ERR(pwm->base);
162
163 pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
164 "img,cr-periph");
165 if (IS_ERR(pwm->periph_regs))
166 return PTR_ERR(pwm->periph_regs);
167
168 pwm->sys_clk = devm_clk_get(&pdev->dev, "sys");
169 if (IS_ERR(pwm->sys_clk)) {
170 dev_err(&pdev->dev, "failed to get system clock\n");
171 return PTR_ERR(pwm->sys_clk);
172 }
173
174 pwm->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
175 if (IS_ERR(pwm->pwm_clk)) {
176 dev_err(&pdev->dev, "failed to get pwm clock\n");
177 return PTR_ERR(pwm->pwm_clk);
178 }
179
180 ret = clk_prepare_enable(pwm->sys_clk);
181 if (ret < 0) {
182 dev_err(&pdev->dev, "could not prepare or enable sys clock\n");
183 return ret;
184 }
185
186 ret = clk_prepare_enable(pwm->pwm_clk);
187 if (ret < 0) {
188 dev_err(&pdev->dev, "could not prepare or enable pwm clock\n");
189 goto disable_sysclk;
190 }
191
192 pwm->chip.dev = &pdev->dev;
193 pwm->chip.ops = &img_pwm_ops;
194 pwm->chip.base = -1;
195 pwm->chip.npwm = 4;
196
197 ret = pwmchip_add(&pwm->chip);
198 if (ret < 0) {
199 dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
200 goto disable_pwmclk;
201 }
202
203 platform_set_drvdata(pdev, pwm);
204 return 0;
205
206disable_pwmclk:
207 clk_disable_unprepare(pwm->pwm_clk);
208disable_sysclk:
209 clk_disable_unprepare(pwm->sys_clk);
210 return ret;
211}
212
213static int img_pwm_remove(struct platform_device *pdev)
214{
215 struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev);
216 u32 val;
217 unsigned int i;
218
219 for (i = 0; i < pwm_chip->chip.npwm; i++) {
220 val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
221 val &= ~BIT(i);
222 img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
223 }
224
225 clk_disable_unprepare(pwm_chip->pwm_clk);
226 clk_disable_unprepare(pwm_chip->sys_clk);
227
228 return pwmchip_remove(&pwm_chip->chip);
229}
230
231static const struct of_device_id img_pwm_of_match[] = {
232 { .compatible = "img,pistachio-pwm", },
233 { }
234};
235MODULE_DEVICE_TABLE(of, img_pwm_of_match);
236
237static struct platform_driver img_pwm_driver = {
238 .driver = {
239 .name = "img-pwm",
240 .of_match_table = img_pwm_of_match,
241 },
242 .probe = img_pwm_probe,
243 .remove = img_pwm_remove,
244};
245module_platform_driver(img_pwm_driver);
246
247MODULE_AUTHOR("Sai Masarapu <Sai.Masarapu@imgtec.com>");
248MODULE_DESCRIPTION("Imagination Technologies PWM DAC driver");
249MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index b95115cdaea7..92abbd56b9f7 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -57,6 +57,7 @@ struct sti_pwm_chip {
57 struct regmap_field *pwm_int_en; 57 struct regmap_field *pwm_int_en;
58 struct pwm_chip chip; 58 struct pwm_chip chip;
59 struct pwm_device *cur; 59 struct pwm_device *cur;
60 unsigned long configured;
60 unsigned int en_count; 61 unsigned int en_count;
61 struct mutex sti_pwm_lock; /* To sync between enable/disable calls */ 62 struct mutex sti_pwm_lock; /* To sync between enable/disable calls */
62 void __iomem *mmio; 63 void __iomem *mmio;
@@ -102,24 +103,6 @@ static int sti_pwm_get_prescale(struct sti_pwm_chip *pc, unsigned long period,
102 return 0; 103 return 0;
103} 104}
104 105
105/* Calculate the number of PWM devices configured with a period. */
106static unsigned int sti_pwm_count_configured(struct pwm_chip *chip)
107{
108 struct pwm_device *pwm;
109 unsigned int ncfg = 0;
110 unsigned int i;
111
112 for (i = 0; i < chip->npwm; i++) {
113 pwm = &chip->pwms[i];
114 if (test_bit(PWMF_REQUESTED, &pwm->flags)) {
115 if (pwm_get_period(pwm))
116 ncfg++;
117 }
118 }
119
120 return ncfg;
121}
122
123/* 106/*
124 * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles. 107 * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles.
125 * The only way to change the period (apart from changing the PWM input clock) 108 * The only way to change the period (apart from changing the PWM input clock)
@@ -141,7 +124,7 @@ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
141 unsigned int ncfg; 124 unsigned int ncfg;
142 bool period_same = false; 125 bool period_same = false;
143 126
144 ncfg = sti_pwm_count_configured(chip); 127 ncfg = hweight_long(pc->configured);
145 if (ncfg) 128 if (ncfg)
146 period_same = (period_ns == pwm_get_period(cur)); 129 period_same = (period_ns == pwm_get_period(cur));
147 130
@@ -197,6 +180,7 @@ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
197 180
198 ret = regmap_field_write(pc->pwm_int_en, 0); 181 ret = regmap_field_write(pc->pwm_int_en, 0);
199 182
183 set_bit(pwm->hwpwm, &pc->configured);
200 pc->cur = pwm; 184 pc->cur = pwm;
201 185
202 dev_dbg(dev, "prescale:%u, period:%i, duty:%i, pwmvalx:%u\n", 186 dev_dbg(dev, "prescale:%u, period:%i, duty:%i, pwmvalx:%u\n",
@@ -254,10 +238,18 @@ static void sti_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
254 mutex_unlock(&pc->sti_pwm_lock); 238 mutex_unlock(&pc->sti_pwm_lock);
255} 239}
256 240
241static void sti_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
242{
243 struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
244
245 clear_bit(pwm->hwpwm, &pc->configured);
246}
247
257static const struct pwm_ops sti_pwm_ops = { 248static const struct pwm_ops sti_pwm_ops = {
258 .config = sti_pwm_config, 249 .config = sti_pwm_config,
259 .enable = sti_pwm_enable, 250 .enable = sti_pwm_enable,
260 .disable = sti_pwm_disable, 251 .disable = sti_pwm_disable,
252 .free = sti_pwm_free,
261 .owner = THIS_MODULE, 253 .owner = THIS_MODULE,
262}; 254};
263 255
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
new file mode 100644
index 000000000000..cd9dde563018
--- /dev/null
+++ b/drivers/pwm/pwm-sun4i.c
@@ -0,0 +1,366 @@
1/*
2 * Driver for Allwinner sun4i Pulse Width Modulation Controller
3 *
4 * Copyright (C) 2014 Alexandre Belloni <alexandre.belloni@free-electrons.com>
5 *
6 * Licensed under GPLv2.
7 */
8
9#include <linux/bitops.h>
10#include <linux/clk.h>
11#include <linux/err.h>
12#include <linux/io.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/of_device.h>
16#include <linux/platform_device.h>
17#include <linux/pwm.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/time.h>
21
22#define PWM_CTRL_REG 0x0
23
24#define PWM_CH_PRD_BASE 0x4
25#define PWM_CH_PRD_OFFSET 0x4
26#define PWM_CH_PRD(ch) (PWM_CH_PRD_BASE + PWM_CH_PRD_OFFSET * (ch))
27
28#define PWMCH_OFFSET 15
29#define PWM_PRESCAL_MASK GENMASK(3, 0)
30#define PWM_PRESCAL_OFF 0
31#define PWM_EN BIT(4)
32#define PWM_ACT_STATE BIT(5)
33#define PWM_CLK_GATING BIT(6)
34#define PWM_MODE BIT(7)
35#define PWM_PULSE BIT(8)
36#define PWM_BYPASS BIT(9)
37
38#define PWM_RDY_BASE 28
39#define PWM_RDY_OFFSET 1
40#define PWM_RDY(ch) BIT(PWM_RDY_BASE + PWM_RDY_OFFSET * (ch))
41
42#define PWM_PRD(prd) (((prd) - 1) << 16)
43#define PWM_PRD_MASK GENMASK(15, 0)
44
45#define PWM_DTY_MASK GENMASK(15, 0)
46
47#define BIT_CH(bit, chan) ((bit) << ((chan) * PWMCH_OFFSET))
48
49static const u32 prescaler_table[] = {
50 120,
51 180,
52 240,
53 360,
54 480,
55 0,
56 0,
57 0,
58 12000,
59 24000,
60 36000,
61 48000,
62 72000,
63 0,
64 0,
65 0, /* Actually 1 but tested separately */
66};
67
68struct sun4i_pwm_data {
69 bool has_prescaler_bypass;
70 bool has_rdy;
71};
72
73struct sun4i_pwm_chip {
74 struct pwm_chip chip;
75 struct clk *clk;
76 void __iomem *base;
77 spinlock_t ctrl_lock;
78 const struct sun4i_pwm_data *data;
79};
80
81static inline struct sun4i_pwm_chip *to_sun4i_pwm_chip(struct pwm_chip *chip)
82{
83 return container_of(chip, struct sun4i_pwm_chip, chip);
84}
85
86static inline u32 sun4i_pwm_readl(struct sun4i_pwm_chip *chip,
87 unsigned long offset)
88{
89 return readl(chip->base + offset);
90}
91
92static inline void sun4i_pwm_writel(struct sun4i_pwm_chip *chip,
93 u32 val, unsigned long offset)
94{
95 writel(val, chip->base + offset);
96}
97
98static int sun4i_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
99 int duty_ns, int period_ns)
100{
101 struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
102 u32 prd, dty, val, clk_gate;
103 u64 clk_rate, div = 0;
104 unsigned int prescaler = 0;
105 int err;
106
107 clk_rate = clk_get_rate(sun4i_pwm->clk);
108
109 if (sun4i_pwm->data->has_prescaler_bypass) {
110 /* First, test without any prescaler when available */
111 prescaler = PWM_PRESCAL_MASK;
112 /*
113 * When not using any prescaler, the clock period in nanoseconds
114 * is not an integer so round it half up instead of
115 * truncating to get less surprising values.
116 */
117 div = clk_rate * period_ns + NSEC_PER_SEC/2;
118 do_div(div, NSEC_PER_SEC);
119 if (div - 1 > PWM_PRD_MASK)
120 prescaler = 0;
121 }
122
123 if (prescaler == 0) {
124 /* Go up from the first divider */
125 for (prescaler = 0; prescaler < PWM_PRESCAL_MASK; prescaler++) {
126 if (!prescaler_table[prescaler])
127 continue;
128 div = clk_rate;
129 do_div(div, prescaler_table[prescaler]);
130 div = div * period_ns;
131 do_div(div, NSEC_PER_SEC);
132 if (div - 1 <= PWM_PRD_MASK)
133 break;
134 }
135
136 if (div - 1 > PWM_PRD_MASK) {
137 dev_err(chip->dev, "period exceeds the maximum value\n");
138 return -EINVAL;
139 }
140 }
141
142 prd = div;
143 div *= duty_ns;
144 do_div(div, period_ns);
145 dty = div;
146
147 err = clk_prepare_enable(sun4i_pwm->clk);
148 if (err) {
149 dev_err(chip->dev, "failed to enable PWM clock\n");
150 return err;
151 }
152
153 spin_lock(&sun4i_pwm->ctrl_lock);
154 val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
155
156 if (sun4i_pwm->data->has_rdy && (val & PWM_RDY(pwm->hwpwm))) {
157 spin_unlock(&sun4i_pwm->ctrl_lock);
158 clk_disable_unprepare(sun4i_pwm->clk);
159 return -EBUSY;
160 }
161
162 clk_gate = val & BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
163 if (clk_gate) {
164 val &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
165 sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
166 }
167
168 val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
169 val &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm);
170 val |= BIT_CH(prescaler, pwm->hwpwm);
171 sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
172
173 val = (dty & PWM_DTY_MASK) | PWM_PRD(prd);
174 sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
175
176 if (clk_gate) {
177 val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
178 val |= clk_gate;
179 sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
180 }
181
182 spin_unlock(&sun4i_pwm->ctrl_lock);
183 clk_disable_unprepare(sun4i_pwm->clk);
184
185 return 0;
186}
187
188static int sun4i_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
189 enum pwm_polarity polarity)
190{
191 struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
192 u32 val;
193 int ret;
194
195 ret = clk_prepare_enable(sun4i_pwm->clk);
196 if (ret) {
197 dev_err(chip->dev, "failed to enable PWM clock\n");
198 return ret;
199 }
200
201 spin_lock(&sun4i_pwm->ctrl_lock);
202 val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
203
204 if (polarity != PWM_POLARITY_NORMAL)
205 val &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
206 else
207 val |= BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
208
209 sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
210
211 spin_unlock(&sun4i_pwm->ctrl_lock);
212 clk_disable_unprepare(sun4i_pwm->clk);
213
214 return 0;
215}
216
217static int sun4i_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
218{
219 struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
220 u32 val;
221 int ret;
222
223 ret = clk_prepare_enable(sun4i_pwm->clk);
224 if (ret) {
225 dev_err(chip->dev, "failed to enable PWM clock\n");
226 return ret;
227 }
228
229 spin_lock(&sun4i_pwm->ctrl_lock);
230 val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
231 val |= BIT_CH(PWM_EN, pwm->hwpwm);
232 val |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
233 sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
234 spin_unlock(&sun4i_pwm->ctrl_lock);
235
236 return 0;
237}
238
239static void sun4i_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
240{
241 struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
242 u32 val;
243
244 spin_lock(&sun4i_pwm->ctrl_lock);
245 val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
246 val &= ~BIT_CH(PWM_EN, pwm->hwpwm);
247 val &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
248 sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
249 spin_unlock(&sun4i_pwm->ctrl_lock);
250
251 clk_disable_unprepare(sun4i_pwm->clk);
252}
253
254static const struct pwm_ops sun4i_pwm_ops = {
255 .config = sun4i_pwm_config,
256 .set_polarity = sun4i_pwm_set_polarity,
257 .enable = sun4i_pwm_enable,
258 .disable = sun4i_pwm_disable,
259 .owner = THIS_MODULE,
260};
261
262static const struct sun4i_pwm_data sun4i_pwm_data_a10 = {
263 .has_prescaler_bypass = false,
264 .has_rdy = false,
265};
266
267static const struct sun4i_pwm_data sun4i_pwm_data_a20 = {
268 .has_prescaler_bypass = true,
269 .has_rdy = true,
270};
271
272static const struct of_device_id sun4i_pwm_dt_ids[] = {
273 {
274 .compatible = "allwinner,sun4i-a10-pwm",
275 .data = &sun4i_pwm_data_a10,
276 }, {
277 .compatible = "allwinner,sun7i-a20-pwm",
278 .data = &sun4i_pwm_data_a20,
279 }, {
280 /* sentinel */
281 },
282};
283MODULE_DEVICE_TABLE(of, sun4i_pwm_dt_ids);
284
285static int sun4i_pwm_probe(struct platform_device *pdev)
286{
287 struct sun4i_pwm_chip *pwm;
288 struct resource *res;
289 u32 val;
290 int i, ret;
291 const struct of_device_id *match;
292
293 match = of_match_device(sun4i_pwm_dt_ids, &pdev->dev);
294
295 pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
296 if (!pwm)
297 return -ENOMEM;
298
299 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
300 pwm->base = devm_ioremap_resource(&pdev->dev, res);
301 if (IS_ERR(pwm->base))
302 return PTR_ERR(pwm->base);
303
304 pwm->clk = devm_clk_get(&pdev->dev, NULL);
305 if (IS_ERR(pwm->clk))
306 return PTR_ERR(pwm->clk);
307
308 pwm->chip.dev = &pdev->dev;
309 pwm->chip.ops = &sun4i_pwm_ops;
310 pwm->chip.base = -1;
311 pwm->chip.npwm = 2;
312 pwm->chip.can_sleep = true;
313 pwm->chip.of_xlate = of_pwm_xlate_with_flags;
314 pwm->chip.of_pwm_n_cells = 3;
315 pwm->data = match->data;
316
317 spin_lock_init(&pwm->ctrl_lock);
318
319 ret = pwmchip_add(&pwm->chip);
320 if (ret < 0) {
321 dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
322 return ret;
323 }
324
325 platform_set_drvdata(pdev, pwm);
326
327 ret = clk_prepare_enable(pwm->clk);
328 if (ret) {
329 dev_err(&pdev->dev, "failed to enable PWM clock\n");
330 goto clk_error;
331 }
332
333 val = sun4i_pwm_readl(pwm, PWM_CTRL_REG);
334 for (i = 0; i < pwm->chip.npwm; i++)
335 if (!(val & BIT_CH(PWM_ACT_STATE, i)))
336 pwm->chip.pwms[i].polarity = PWM_POLARITY_INVERSED;
337 clk_disable_unprepare(pwm->clk);
338
339 return 0;
340
341clk_error:
342 pwmchip_remove(&pwm->chip);
343 return ret;
344}
345
346static int sun4i_pwm_remove(struct platform_device *pdev)
347{
348 struct sun4i_pwm_chip *pwm = platform_get_drvdata(pdev);
349
350 return pwmchip_remove(&pwm->chip);
351}
352
353static struct platform_driver sun4i_pwm_driver = {
354 .driver = {
355 .name = "sun4i-pwm",
356 .of_match_table = sun4i_pwm_dt_ids,
357 },
358 .probe = sun4i_pwm_probe,
359 .remove = sun4i_pwm_remove,
360};
361module_platform_driver(sun4i_pwm_driver);
362
363MODULE_ALIAS("platform:sun4i-pwm");
364MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>");
365MODULE_DESCRIPTION("Allwinner sun4i PWM driver");
366MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 5b97cae5423a..cabd7d8e05cc 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -87,7 +87,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
87 * cycles at the PWM clock rate will take period_ns nanoseconds. 87 * cycles at the PWM clock rate will take period_ns nanoseconds.
88 */ 88 */
89 rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH; 89 rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH;
90 hz = 1000000000ul / period_ns; 90 hz = NSEC_PER_SEC / period_ns;
91 91
92 rate = (rate + (hz / 2)) / hz; 92 rate = (rate + (hz / 2)) / hz;
93 93
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index f64c5decb747..47295940a868 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -815,8 +815,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
815 return txd; 815 return txd;
816} 816}
817 817
818static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 818static int tsi721_terminate_all(struct dma_chan *dchan)
819 unsigned long arg)
820{ 819{
821 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 820 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
822 struct tsi721_tx_desc *desc, *_d; 821 struct tsi721_tx_desc *desc, *_d;
@@ -825,9 +824,6 @@ static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
825 824
826 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); 825 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
827 826
828 if (cmd != DMA_TERMINATE_ALL)
829 return -ENOSYS;
830
831 spin_lock_bh(&bdma_chan->lock); 827 spin_lock_bh(&bdma_chan->lock);
832 828
833 bdma_chan->active = false; 829 bdma_chan->active = false;
@@ -901,7 +897,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
901 mport->dma.device_tx_status = tsi721_tx_status; 897 mport->dma.device_tx_status = tsi721_tx_status;
902 mport->dma.device_issue_pending = tsi721_issue_pending; 898 mport->dma.device_issue_pending = tsi721_issue_pending;
903 mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; 899 mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
904 mport->dma.device_control = tsi721_device_control; 900 mport->dma.device_terminate_all = tsi721_terminate_all;
905 901
906 err = dma_async_device_register(&mport->dma); 902 err = dma_async_device_register(&mport->dma);
907 if (err) 903 if (err)
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index e8647f7cf25e..00c5cc3d9546 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -205,6 +205,7 @@ static int rpm_reg_write(struct qcom_rpm_reg *vreg,
205 vreg->val[req->word] |= value << req->shift; 205 vreg->val[req->word] |= value << req->shift;
206 206
207 return qcom_rpm_write(vreg->rpm, 207 return qcom_rpm_write(vreg->rpm,
208 QCOM_RPM_ACTIVE_STATE,
208 vreg->resource, 209 vreg->resource,
209 vreg->val, 210 vreg->val,
210 vreg->parts->request_len); 211 vreg->parts->request_len);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index cedb41c95dae..b5b5c3d485d6 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -65,7 +65,7 @@ config RTC_DEBUG
65comment "RTC interfaces" 65comment "RTC interfaces"
66 66
67config RTC_INTF_SYSFS 67config RTC_INTF_SYSFS
68 boolean "/sys/class/rtc/rtcN (sysfs)" 68 bool "/sys/class/rtc/rtcN (sysfs)"
69 depends on SYSFS 69 depends on SYSFS
70 default RTC_CLASS 70 default RTC_CLASS
71 help 71 help
@@ -75,7 +75,7 @@ config RTC_INTF_SYSFS
75 If unsure, say Y. 75 If unsure, say Y.
76 76
77config RTC_INTF_PROC 77config RTC_INTF_PROC
78 boolean "/proc/driver/rtc (procfs for rtcN)" 78 bool "/proc/driver/rtc (procfs for rtcN)"
79 depends on PROC_FS 79 depends on PROC_FS
80 default RTC_CLASS 80 default RTC_CLASS
81 help 81 help
@@ -88,7 +88,7 @@ config RTC_INTF_PROC
88 If unsure, say Y. 88 If unsure, say Y.
89 89
90config RTC_INTF_DEV 90config RTC_INTF_DEV
91 boolean "/dev/rtcN (character devices)" 91 bool "/dev/rtcN (character devices)"
92 default RTC_CLASS 92 default RTC_CLASS
93 help 93 help
94 Say yes here if you want to use your RTCs using the /dev 94 Say yes here if you want to use your RTCs using the /dev
@@ -466,7 +466,7 @@ config RTC_DRV_DM355EVM
466 Supports the RTC firmware in the MSP430 on the DM355 EVM. 466 Supports the RTC firmware in the MSP430 on the DM355 EVM.
467 467
468config RTC_DRV_TWL92330 468config RTC_DRV_TWL92330
469 boolean "TI TWL92330/Menelaus" 469 bool "TI TWL92330/Menelaus"
470 depends on MENELAUS 470 depends on MENELAUS
471 help 471 help
472 If you say yes here you get support for the RTC on the 472 If you say yes here you get support for the RTC on the
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 8c3bfcb115b7..803869c7d7c2 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -399,21 +399,21 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
399 * of this RTC chip. We check for it anyways in case support is 399 * of this RTC chip. We check for it anyways in case support is
400 * added in the future. 400 * added in the future.
401 */ 401 */
402 if (unlikely((seconds >= 0xc0) && (seconds <= 0xff))) 402 if (unlikely(seconds >= 0xc0))
403 alrm->time.tm_sec = -1; 403 alrm->time.tm_sec = -1;
404 else 404 else
405 alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds, 405 alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds,
406 RTC_SECS_BCD_MASK, 406 RTC_SECS_BCD_MASK,
407 RTC_SECS_BIN_MASK); 407 RTC_SECS_BIN_MASK);
408 408
409 if (unlikely((minutes >= 0xc0) && (minutes <= 0xff))) 409 if (unlikely(minutes >= 0xc0))
410 alrm->time.tm_min = -1; 410 alrm->time.tm_min = -1;
411 else 411 else
412 alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes, 412 alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes,
413 RTC_MINS_BCD_MASK, 413 RTC_MINS_BCD_MASK,
414 RTC_MINS_BIN_MASK); 414 RTC_MINS_BIN_MASK);
415 415
416 if (unlikely((hours >= 0xc0) && (hours <= 0xff))) 416 if (unlikely(hours >= 0xc0))
417 alrm->time.tm_hour = -1; 417 alrm->time.tm_hour = -1;
418 else 418 else
419 alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours, 419 alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours,
@@ -472,13 +472,13 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
472 * field, and we only support four fields. We put the support 472 * field, and we only support four fields. We put the support
473 * here anyways for the future. 473 * here anyways for the future.
474 */ 474 */
475 if (unlikely((seconds >= 0xc0) && (seconds <= 0xff))) 475 if (unlikely(seconds >= 0xc0))
476 seconds = 0xff; 476 seconds = 0xff;
477 477
478 if (unlikely((minutes >= 0xc0) && (minutes <= 0xff))) 478 if (unlikely(minutes >= 0xc0))
479 minutes = 0xff; 479 minutes = 0xff;
480 480
481 if (unlikely((hours >= 0xc0) && (hours <= 0xff))) 481 if (unlikely(hours >= 0xc0))
482 hours = 0xff; 482 hours = 0xff;
483 483
484 alrm->time.tm_mon = -1; 484 alrm->time.tm_mon = -1;
@@ -528,7 +528,6 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
528/* ----------------------------------------------------------------------- */ 528/* ----------------------------------------------------------------------- */
529/* /dev/rtcX Interface functions */ 529/* /dev/rtcX Interface functions */
530 530
531#ifdef CONFIG_RTC_INTF_DEV
532/** 531/**
533 * ds1685_rtc_alarm_irq_enable - replaces ioctl() RTC_AIE on/off. 532 * ds1685_rtc_alarm_irq_enable - replaces ioctl() RTC_AIE on/off.
534 * @dev: pointer to device structure. 533 * @dev: pointer to device structure.
@@ -557,7 +556,6 @@ ds1685_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
557 556
558 return 0; 557 return 0;
559} 558}
560#endif
561/* ----------------------------------------------------------------------- */ 559/* ----------------------------------------------------------------------- */
562 560
563 561
@@ -1612,7 +1610,7 @@ ds1685_rtc_sysfs_time_regs_show(struct device *dev,
1612 ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false); 1610 ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false);
1613 1611
1614 /* Make sure we actually matched something. */ 1612 /* Make sure we actually matched something. */
1615 if (!bcd_reg_info && !bin_reg_info) 1613 if (!bcd_reg_info || !bin_reg_info)
1616 return -EINVAL; 1614 return -EINVAL;
1617 1615
1618 /* bcd_reg_info->reg == bin_reg_info->reg. */ 1616 /* bcd_reg_info->reg == bin_reg_info->reg. */
@@ -1650,7 +1648,7 @@ ds1685_rtc_sysfs_time_regs_store(struct device *dev,
1650 return -EINVAL; 1648 return -EINVAL;
1651 1649
1652 /* Make sure we actually matched something. */ 1650 /* Make sure we actually matched something. */
1653 if (!bcd_reg_info && !bin_reg_info) 1651 if (!bcd_reg_info || !bin_reg_info)
1654 return -EINVAL; 1652 return -EINVAL;
1655 1653
1656 /* Check for a valid range. */ 1654 /* Check for a valid range. */
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index aa3e2c7cd83c..a6f5ee80fadc 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -178,12 +178,6 @@ static void pci_esp_dma_drain(struct esp *esp)
178 break; 178 break;
179 cpu_relax(); 179 cpu_relax();
180 } 180 }
181 if (resid > 1) {
182 /* FIFO not cleared */
183 shost_printk(KERN_INFO, esp->host,
184 "FIFO not cleared, %d bytes left\n",
185 resid);
186 }
187 181
188 /* 182 /*
189 * When there is a residual BCMPLT will never be set 183 * When there is a residual BCMPLT will never be set
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 96241b20fd2c..a7cc61837818 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -585,7 +585,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
585 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 585 "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
586 return NULL; 586 return NULL;
587 } 587 }
588 shost->dma_boundary = pcidev->dma_mask;
589 shost->max_id = BE2_MAX_SESSIONS; 588 shost->max_id = BE2_MAX_SESSIONS;
590 shost->max_channel = 0; 589 shost->max_channel = 0;
591 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 590 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 95d581c45413..a1cfbd3dda47 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -6831,10 +6831,8 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
6831 char *name) 6831 char *name)
6832{ 6832{
6833 struct workqueue_struct *wq = NULL; 6833 struct workqueue_struct *wq = NULL;
6834 char wq_name[20];
6835 6834
6836 snprintf(wq_name, sizeof(wq_name), "%s_%d_hpsa", name, h->ctlr); 6835 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
6837 wq = alloc_ordered_workqueue(wq_name, 0);
6838 if (!wq) 6836 if (!wq)
6839 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name); 6837 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
6840 6838
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 73f9feecda72..99f43b7fc9ab 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1570,9 +1570,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1570 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. 1570 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
1571 */ 1571 */
1572 memset(&port_name, 0, 36); 1572 memset(&port_name, 0, 36);
1573 snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", 1573 snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn);
1574 fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
1575 fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
1576 /* 1574 /*
1577 * Locate our struct se_node_acl either from an explict NodeACL created 1575 * Locate our struct se_node_acl either from an explict NodeACL created
1578 * via ConfigFS, or via running in TPG demo mode. 1576 * via ConfigFS, or via running in TPG demo mode.
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0cbc1fb45f10..2270bd51f9c2 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -546,7 +546,7 @@ static ssize_t
546sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) 546sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
547{ 547{
548 sg_io_hdr_t *hp = &srp->header; 548 sg_io_hdr_t *hp = &srp->header;
549 int err = 0; 549 int err = 0, err2;
550 int len; 550 int len;
551 551
552 if (count < SZ_SG_IO_HDR) { 552 if (count < SZ_SG_IO_HDR) {
@@ -575,8 +575,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
575 goto err_out; 575 goto err_out;
576 } 576 }
577err_out: 577err_out:
578 err = sg_finish_rem_req(srp); 578 err2 = sg_finish_rem_req(srp);
579 return (0 == err) ? count : err; 579 return err ? : err2 ? : count;
580} 580}
581 581
582static ssize_t 582static ssize_t
@@ -1335,6 +1335,17 @@ sg_rq_end_io(struct request *rq, int uptodate)
1335 } 1335 }
1336 /* Rely on write phase to clean out srp status values, so no "else" */ 1336 /* Rely on write phase to clean out srp status values, so no "else" */
1337 1337
1338 /*
1339 * Free the request as soon as it is complete so that its resources
1340 * can be reused without waiting for userspace to read() the
1341 * result. But keep the associated bio (if any) around until
1342 * blk_rq_unmap_user() can be called from user context.
1343 */
1344 srp->rq = NULL;
1345 if (rq->cmd != rq->__cmd)
1346 kfree(rq->cmd);
1347 __blk_put_request(rq->q, rq);
1348
1338 write_lock_irqsave(&sfp->rq_list_lock, iflags); 1349 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1339 if (unlikely(srp->orphan)) { 1350 if (unlikely(srp->orphan)) {
1340 if (sfp->keep_orphan) 1351 if (sfp->keep_orphan)
@@ -1669,7 +1680,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
1669 return -ENOMEM; 1680 return -ENOMEM;
1670 } 1681 }
1671 1682
1672 rq = blk_get_request(q, rw, GFP_ATOMIC); 1683 /*
1684 * NOTE
1685 *
1686 * With scsi-mq enabled, there are a fixed number of preallocated
1687 * requests equal in number to shost->can_queue. If all of the
1688 * preallocated requests are already in use, then using GFP_ATOMIC with
1689 * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
1690 * will cause blk_get_request() to sleep until an active command
1691 * completes, freeing up a request. Neither option is ideal, but
1692 * GFP_KERNEL is the better choice to prevent userspace from getting an
1693 * unexpected EWOULDBLOCK.
1694 *
1695 * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
1696 * does not sleep except under memory pressure.
1697 */
1698 rq = blk_get_request(q, rw, GFP_KERNEL);
1673 if (IS_ERR(rq)) { 1699 if (IS_ERR(rq)) {
1674 kfree(long_cmdp); 1700 kfree(long_cmdp);
1675 return PTR_ERR(rq); 1701 return PTR_ERR(rq);
@@ -1759,10 +1785,10 @@ sg_finish_rem_req(Sg_request *srp)
1759 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, 1785 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1760 "sg_finish_rem_req: res_used=%d\n", 1786 "sg_finish_rem_req: res_used=%d\n",
1761 (int) srp->res_used)); 1787 (int) srp->res_used));
1762 if (srp->rq) { 1788 if (srp->bio)
1763 if (srp->bio) 1789 ret = blk_rq_unmap_user(srp->bio);
1764 ret = blk_rq_unmap_user(srp->bio);
1765 1790
1791 if (srp->rq) {
1766 if (srp->rq->cmd != srp->rq->__cmd) 1792 if (srp->rq->cmd != srp->rq->__cmd)
1767 kfree(srp->rq->cmd); 1793 kfree(srp->rq->cmd);
1768 blk_put_request(srp->rq); 1794 blk_put_request(srp->rq);
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index c52bb5dfaedb..f164f24a4a55 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -950,6 +950,12 @@ static int virtscsi_probe(struct virtio_device *vdev)
950 u32 num_queues; 950 u32 num_queues;
951 struct scsi_host_template *hostt; 951 struct scsi_host_template *hostt;
952 952
953 if (!vdev->config->get) {
954 dev_err(&vdev->dev, "%s failure: config access disabled\n",
955 __func__);
956 return -EINVAL;
957 }
958
953 /* We need to know how many queues before we allocate. */ 959 /* We need to know how many queues before we allocate. */
954 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; 960 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
955 961
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 7702664d7ed3..289ad016d925 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -870,6 +870,7 @@ fail_free_params:
870} 870}
871 871
872static struct scsi_host_template wd719x_template = { 872static struct scsi_host_template wd719x_template = {
873 .module = THIS_MODULE,
873 .name = "Western Digital 719x", 874 .name = "Western Digital 719x",
874 .queuecommand = wd719x_queuecommand, 875 .queuecommand = wd719x_queuecommand,
875 .eh_abort_handler = wd719x_abort, 876 .eh_abort_handler = wd719x_abort,
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index f3ee439d6f0e..cd4c293f0dd0 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -81,7 +81,9 @@ static int __init sh_pm_runtime_init(void)
81 if (!of_machine_is_compatible("renesas,emev2") && 81 if (!of_machine_is_compatible("renesas,emev2") &&
82 !of_machine_is_compatible("renesas,r7s72100") && 82 !of_machine_is_compatible("renesas,r7s72100") &&
83 !of_machine_is_compatible("renesas,r8a73a4") && 83 !of_machine_is_compatible("renesas,r8a73a4") &&
84#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
84 !of_machine_is_compatible("renesas,r8a7740") && 85 !of_machine_is_compatible("renesas,r8a7740") &&
86#endif
85 !of_machine_is_compatible("renesas,r8a7778") && 87 !of_machine_is_compatible("renesas,r8a7778") &&
86 !of_machine_is_compatible("renesas,r8a7779") && 88 !of_machine_is_compatible("renesas,r8a7779") &&
87 !of_machine_is_compatible("renesas,r8a7790") && 89 !of_machine_is_compatible("renesas,r8a7790") &&
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 95ccedabba4f..ab8dfbef6f1b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -29,7 +29,7 @@ menuconfig SPI
29if SPI 29if SPI
30 30
31config SPI_DEBUG 31config SPI_DEBUG
32 boolean "Debug support for SPI drivers" 32 bool "Debug support for SPI drivers"
33 depends on DEBUG_KERNEL 33 depends on DEBUG_KERNEL
34 help 34 help
35 Say "yes" to enable debug messaging (like dev_dbg and pr_debug), 35 Say "yes" to enable debug messaging (like dev_dbg and pr_debug),
@@ -40,8 +40,8 @@ config SPI_DEBUG
40# 40#
41 41
42config SPI_MASTER 42config SPI_MASTER
43# boolean "SPI Master Support" 43# bool "SPI Master Support"
44 boolean 44 bool
45 default SPI 45 default SPI
46 help 46 help
47 If your system has an master-capable SPI controller (which 47 If your system has an master-capable SPI controller (which
diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig
index 7eda0b8b7aab..0a89ad16371f 100644
--- a/drivers/staging/board/Kconfig
+++ b/drivers/staging/board/Kconfig
@@ -1,5 +1,5 @@
1config STAGING_BOARD 1config STAGING_BOARD
2 boolean "Staging Board Support" 2 bool "Staging Board Support"
3 depends on OF_ADDRESS 3 depends on OF_ADDRESS
4 depends on BROKEN 4 depends on BROKEN
5 help 5 help
diff --git a/drivers/staging/emxx_udc/Kconfig b/drivers/staging/emxx_udc/Kconfig
index 9bc6d3db86d9..cc3402020487 100644
--- a/drivers/staging/emxx_udc/Kconfig
+++ b/drivers/staging/emxx_udc/Kconfig
@@ -1,5 +1,5 @@
1config USB_EMXX 1config USB_EMXX
2 boolean "EMXX USB Function Device Controller" 2 bool "EMXX USB Function Device Controller"
3 depends on USB_GADGET && (ARCH_SHMOBILE || (ARM && COMPILE_TEST)) 3 depends on USB_GADGET && (ARCH_SHMOBILE || (ARM && COMPILE_TEST))
4 help 4 help
5 The Emma Mobile series of SoCs from Renesas Electronics and 5 The Emma Mobile series of SoCs from Renesas Electronics and
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index fa38be0982f9..24183028bd71 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -30,13 +30,13 @@ config IIO_SIMPLE_DUMMY
30if IIO_SIMPLE_DUMMY 30if IIO_SIMPLE_DUMMY
31 31
32config IIO_SIMPLE_DUMMY_EVENTS 32config IIO_SIMPLE_DUMMY_EVENTS
33 boolean "Event generation support" 33 bool "Event generation support"
34 select IIO_DUMMY_EVGEN 34 select IIO_DUMMY_EVGEN
35 help 35 help
36 Add some dummy events to the simple dummy driver. 36 Add some dummy events to the simple dummy driver.
37 37
38config IIO_SIMPLE_DUMMY_BUFFER 38config IIO_SIMPLE_DUMMY_BUFFER
39 boolean "Buffered capture support" 39 bool "Buffered capture support"
40 select IIO_BUFFER 40 select IIO_BUFFER
41 select IIO_KFIFO_BUF 41 select IIO_KFIFO_BUF
42 help 42 help
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 88614b71cf6d..ddf1fa9f67f8 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -270,7 +270,7 @@ void ll_invalidate_aliases(struct inode *inode)
270 270
271int ll_revalidate_it_finish(struct ptlrpc_request *request, 271int ll_revalidate_it_finish(struct ptlrpc_request *request,
272 struct lookup_intent *it, 272 struct lookup_intent *it,
273 struct dentry *de) 273 struct inode *inode)
274{ 274{
275 int rc = 0; 275 int rc = 0;
276 276
@@ -280,19 +280,17 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request,
280 if (it_disposition(it, DISP_LOOKUP_NEG)) 280 if (it_disposition(it, DISP_LOOKUP_NEG))
281 return -ENOENT; 281 return -ENOENT;
282 282
283 rc = ll_prep_inode(&de->d_inode, request, NULL, it); 283 rc = ll_prep_inode(&inode, request, NULL, it);
284 284
285 return rc; 285 return rc;
286} 286}
287 287
288void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry) 288void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
289{ 289{
290 LASSERT(it != NULL); 290 LASSERT(it != NULL);
291 LASSERT(dentry != NULL);
292 291
293 if (it->d.lustre.it_lock_mode && dentry->d_inode != NULL) { 292 if (it->d.lustre.it_lock_mode && inode != NULL) {
294 struct inode *inode = dentry->d_inode; 293 struct ll_sb_info *sbi = ll_i2sbi(inode);
295 struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
296 294
297 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n", 295 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
298 inode, inode->i_ino, inode->i_generation); 296 inode, inode->i_ino, inode->i_generation);
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 7c7ef7ec908e..5ebee6ca0a10 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -2912,8 +2912,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
2912 oit.it_op = IT_LOOKUP; 2912 oit.it_op = IT_LOOKUP;
2913 2913
2914 /* Call getattr by fid, so do not provide name at all. */ 2914 /* Call getattr by fid, so do not provide name at all. */
2915 op_data = ll_prep_md_op_data(NULL, dentry->d_inode, 2915 op_data = ll_prep_md_op_data(NULL, inode,
2916 dentry->d_inode, NULL, 0, 0, 2916 inode, NULL, 0, 0,
2917 LUSTRE_OPC_ANY, NULL); 2917 LUSTRE_OPC_ANY, NULL);
2918 if (IS_ERR(op_data)) 2918 if (IS_ERR(op_data))
2919 return PTR_ERR(op_data); 2919 return PTR_ERR(op_data);
@@ -2931,7 +2931,7 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
2931 goto out; 2931 goto out;
2932 } 2932 }
2933 2933
2934 rc = ll_revalidate_it_finish(req, &oit, dentry); 2934 rc = ll_revalidate_it_finish(req, &oit, inode);
2935 if (rc != 0) { 2935 if (rc != 0) {
2936 ll_intent_release(&oit); 2936 ll_intent_release(&oit);
2937 goto out; 2937 goto out;
@@ -2944,7 +2944,7 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
2944 if (!dentry->d_inode->i_nlink) 2944 if (!dentry->d_inode->i_nlink)
2945 d_lustre_invalidate(dentry, 0); 2945 d_lustre_invalidate(dentry, 0);
2946 2946
2947 ll_lookup_finish_locks(&oit, dentry); 2947 ll_lookup_finish_locks(&oit, inode);
2948 } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) { 2948 } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) {
2949 struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode); 2949 struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
2950 u64 valid = OBD_MD_FLGETATTR; 2950 u64 valid = OBD_MD_FLGETATTR;
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index d032c2b086cc..2af1d7286250 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -786,9 +786,9 @@ extern const struct dentry_operations ll_d_ops;
786void ll_intent_drop_lock(struct lookup_intent *); 786void ll_intent_drop_lock(struct lookup_intent *);
787void ll_intent_release(struct lookup_intent *); 787void ll_intent_release(struct lookup_intent *);
788void ll_invalidate_aliases(struct inode *); 788void ll_invalidate_aliases(struct inode *);
789void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry); 789void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode);
790int ll_revalidate_it_finish(struct ptlrpc_request *request, 790int ll_revalidate_it_finish(struct ptlrpc_request *request,
791 struct lookup_intent *it, struct dentry *de); 791 struct lookup_intent *it, struct inode *inode);
792 792
793/* llite/llite_lib.c */ 793/* llite/llite_lib.c */
794extern struct super_operations lustre_super_operations; 794extern struct super_operations lustre_super_operations;
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 4f361b77c749..890ac190f5fa 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -481,6 +481,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
481 struct lookup_intent lookup_it = { .it_op = IT_LOOKUP }; 481 struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
482 struct dentry *save = dentry, *retval; 482 struct dentry *save = dentry, *retval;
483 struct ptlrpc_request *req = NULL; 483 struct ptlrpc_request *req = NULL;
484 struct inode *inode;
484 struct md_op_data *op_data; 485 struct md_op_data *op_data;
485 __u32 opc; 486 __u32 opc;
486 int rc; 487 int rc;
@@ -539,12 +540,13 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
539 goto out; 540 goto out;
540 } 541 }
541 542
542 if ((it->it_op & IT_OPEN) && dentry->d_inode && 543 inode = dentry->d_inode;
543 !S_ISREG(dentry->d_inode->i_mode) && 544 if ((it->it_op & IT_OPEN) && inode &&
544 !S_ISDIR(dentry->d_inode->i_mode)) { 545 !S_ISREG(inode->i_mode) &&
545 ll_release_openhandle(dentry->d_inode, it); 546 !S_ISDIR(inode->i_mode)) {
547 ll_release_openhandle(inode, it);
546 } 548 }
547 ll_lookup_finish_locks(it, dentry); 549 ll_lookup_finish_locks(it, inode);
548 550
549 if (dentry == save) 551 if (dentry == save)
550 retval = NULL; 552 retval = NULL;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index aebde3289c50..50bad55a0c42 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -30,7 +30,7 @@
30#include <target/target_core_fabric.h> 30#include <target/target_core_fabric.h>
31#include <target/target_core_configfs.h> 31#include <target/target_core_configfs.h>
32 32
33#include "iscsi_target_core.h" 33#include <target/iscsi/iscsi_target_core.h>
34#include "iscsi_target_parameters.h" 34#include "iscsi_target_parameters.h"
35#include "iscsi_target_seq_pdu_list.h" 35#include "iscsi_target_seq_pdu_list.h"
36#include "iscsi_target_tq.h" 36#include "iscsi_target_tq.h"
@@ -45,7 +45,7 @@
45#include "iscsi_target_util.h" 45#include "iscsi_target_util.h"
46#include "iscsi_target.h" 46#include "iscsi_target.h"
47#include "iscsi_target_device.h" 47#include "iscsi_target_device.h"
48#include "iscsi_target_stat.h" 48#include <target/iscsi/iscsi_target_stat.h>
49 49
50#include <target/iscsi/iscsi_transport.h> 50#include <target/iscsi/iscsi_transport.h>
51 51
@@ -968,11 +968,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
968 968
969 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 969 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
970 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 970 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
971 spin_lock_bh(&conn->sess->ttt_lock); 971 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
972 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
973 if (cmd->targ_xfer_tag == 0xFFFFFFFF)
974 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
975 spin_unlock_bh(&conn->sess->ttt_lock);
976 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE) 972 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
977 cmd->targ_xfer_tag = 0xFFFFFFFF; 973 cmd->targ_xfer_tag = 0xFFFFFFFF;
978 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 974 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
@@ -1998,6 +1994,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1998 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 1994 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1999 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 1995 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2000 cmd->data_direction = DMA_NONE; 1996 cmd->data_direction = DMA_NONE;
1997 cmd->text_in_ptr = NULL;
2001 1998
2002 return 0; 1999 return 0;
2003} 2000}
@@ -2011,9 +2008,13 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2011 int cmdsn_ret; 2008 int cmdsn_ret;
2012 2009
2013 if (!text_in) { 2010 if (!text_in) {
2014 pr_err("Unable to locate text_in buffer for sendtargets" 2011 cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
2015 " discovery\n"); 2012 if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
2016 goto reject; 2013 pr_err("Unable to locate text_in buffer for sendtargets"
2014 " discovery\n");
2015 goto reject;
2016 }
2017 goto empty_sendtargets;
2017 } 2018 }
2018 if (strncmp("SendTargets", text_in, 11) != 0) { 2019 if (strncmp("SendTargets", text_in, 11) != 0) {
2019 pr_err("Received Text Data that is not" 2020 pr_err("Received Text Data that is not"
@@ -2040,6 +2041,7 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2040 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 2041 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2041 spin_unlock_bh(&conn->cmd_lock); 2042 spin_unlock_bh(&conn->cmd_lock);
2042 2043
2044empty_sendtargets:
2043 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); 2045 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2044 2046
2045 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 2047 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
@@ -3047,11 +3049,7 @@ static int iscsit_send_r2t(
3047 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 3049 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
3048 (struct scsi_lun *)&hdr->lun); 3050 (struct scsi_lun *)&hdr->lun);
3049 hdr->itt = cmd->init_task_tag; 3051 hdr->itt = cmd->init_task_tag;
3050 spin_lock_bh(&conn->sess->ttt_lock); 3052 r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
3051 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
3052 if (r2t->targ_xfer_tag == 0xFFFFFFFF)
3053 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
3054 spin_unlock_bh(&conn->sess->ttt_lock);
3055 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); 3053 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
3056 hdr->statsn = cpu_to_be32(conn->stat_sn); 3054 hdr->statsn = cpu_to_be32(conn->stat_sn);
3057 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3055 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
@@ -3393,7 +3391,8 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
3393 3391
3394static int 3392static int
3395iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, 3393iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3396 enum iscsit_transport_type network_transport) 3394 enum iscsit_transport_type network_transport,
3395 int skip_bytes, bool *completed)
3397{ 3396{
3398 char *payload = NULL; 3397 char *payload = NULL;
3399 struct iscsi_conn *conn = cmd->conn; 3398 struct iscsi_conn *conn = cmd->conn;
@@ -3405,7 +3404,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3405 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ 3404 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
3406 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL; 3405 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
3407 3406
3408 buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength, 3407 buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
3409 SENDTARGETS_BUF_LIMIT); 3408 SENDTARGETS_BUF_LIMIT);
3410 3409
3411 payload = kzalloc(buffer_len, GFP_KERNEL); 3410 payload = kzalloc(buffer_len, GFP_KERNEL);
@@ -3484,9 +3483,16 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3484 end_of_buf = 1; 3483 end_of_buf = 1;
3485 goto eob; 3484 goto eob;
3486 } 3485 }
3487 memcpy(payload + payload_len, buf, len); 3486
3488 payload_len += len; 3487 if (skip_bytes && len <= skip_bytes) {
3489 target_name_printed = 1; 3488 skip_bytes -= len;
3489 } else {
3490 memcpy(payload + payload_len, buf, len);
3491 payload_len += len;
3492 target_name_printed = 1;
3493 if (len > skip_bytes)
3494 skip_bytes = 0;
3495 }
3490 } 3496 }
3491 3497
3492 len = sprintf(buf, "TargetAddress=" 3498 len = sprintf(buf, "TargetAddress="
@@ -3502,15 +3508,24 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3502 end_of_buf = 1; 3508 end_of_buf = 1;
3503 goto eob; 3509 goto eob;
3504 } 3510 }
3505 memcpy(payload + payload_len, buf, len); 3511
3506 payload_len += len; 3512 if (skip_bytes && len <= skip_bytes) {
3513 skip_bytes -= len;
3514 } else {
3515 memcpy(payload + payload_len, buf, len);
3516 payload_len += len;
3517 if (len > skip_bytes)
3518 skip_bytes = 0;
3519 }
3507 } 3520 }
3508 spin_unlock(&tpg->tpg_np_lock); 3521 spin_unlock(&tpg->tpg_np_lock);
3509 } 3522 }
3510 spin_unlock(&tiqn->tiqn_tpg_lock); 3523 spin_unlock(&tiqn->tiqn_tpg_lock);
3511eob: 3524eob:
3512 if (end_of_buf) 3525 if (end_of_buf) {
3526 *completed = false;
3513 break; 3527 break;
3528 }
3514 3529
3515 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) 3530 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
3516 break; 3531 break;
@@ -3528,13 +3543,23 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3528 enum iscsit_transport_type network_transport) 3543 enum iscsit_transport_type network_transport)
3529{ 3544{
3530 int text_length, padding; 3545 int text_length, padding;
3546 bool completed = true;
3531 3547
3532 text_length = iscsit_build_sendtargets_response(cmd, network_transport); 3548 text_length = iscsit_build_sendtargets_response(cmd, network_transport,
3549 cmd->read_data_done,
3550 &completed);
3533 if (text_length < 0) 3551 if (text_length < 0)
3534 return text_length; 3552 return text_length;
3535 3553
3554 if (completed) {
3555 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3556 } else {
3557 hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE;
3558 cmd->read_data_done += text_length;
3559 if (cmd->targ_xfer_tag == 0xFFFFFFFF)
3560 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
3561 }
3536 hdr->opcode = ISCSI_OP_TEXT_RSP; 3562 hdr->opcode = ISCSI_OP_TEXT_RSP;
3537 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3538 padding = ((-text_length) & 3); 3563 padding = ((-text_length) & 3);
3539 hton24(hdr->dlength, text_length); 3564 hton24(hdr->dlength, text_length);
3540 hdr->itt = cmd->init_task_tag; 3565 hdr->itt = cmd->init_task_tag;
@@ -3543,21 +3568,25 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3543 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3568 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3544 3569
3545 iscsit_increment_maxcmdsn(cmd, conn->sess); 3570 iscsit_increment_maxcmdsn(cmd, conn->sess);
3571 /*
3572 * Reset maxcmdsn_inc in multi-part text payload exchanges to
3573 * correctly increment MaxCmdSN for each response answering a
3574 * non immediate text request with a valid CmdSN.
3575 */
3576 cmd->maxcmdsn_inc = 0;
3546 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3577 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3547 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3578 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3548 3579
3549 pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x," 3580 pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
3550 " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn, 3581 " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
3551 text_length, conn->cid); 3582 cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
3583 !!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
3584 !!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
3552 3585
3553 return text_length + padding; 3586 return text_length + padding;
3554} 3587}
3555EXPORT_SYMBOL(iscsit_build_text_rsp); 3588EXPORT_SYMBOL(iscsit_build_text_rsp);
3556 3589
3557/*
3558 * FIXME: Add support for F_BIT and C_BIT when the length is longer than
3559 * MaxRecvDataSegmentLength.
3560 */
3561static int iscsit_send_text_rsp( 3590static int iscsit_send_text_rsp(
3562 struct iscsi_cmd *cmd, 3591 struct iscsi_cmd *cmd,
3563 struct iscsi_conn *conn) 3592 struct iscsi_conn *conn)
@@ -4021,9 +4050,15 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
4021 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); 4050 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
4022 break; 4051 break;
4023 case ISCSI_OP_TEXT: 4052 case ISCSI_OP_TEXT:
4024 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 4053 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
4025 if (!cmd) 4054 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
4026 goto reject; 4055 if (!cmd)
4056 goto reject;
4057 } else {
4058 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4059 if (!cmd)
4060 goto reject;
4061 }
4027 4062
4028 ret = iscsit_handle_text_cmd(conn, cmd, buf); 4063 ret = iscsit_handle_text_cmd(conn, cmd, buf);
4029 break; 4064 break;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index ab4915c0d933..47e249dccb5f 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -22,7 +22,7 @@
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24 24
25#include "iscsi_target_core.h" 25#include <target/iscsi/iscsi_target_core.h>
26#include "iscsi_target_nego.h" 26#include "iscsi_target_nego.h"
27#include "iscsi_target_auth.h" 27#include "iscsi_target_auth.h"
28 28
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 9059c1e0b26e..48384b675e62 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -28,7 +28,7 @@
28#include <target/configfs_macros.h> 28#include <target/configfs_macros.h>
29#include <target/iscsi/iscsi_transport.h> 29#include <target/iscsi/iscsi_transport.h>
30 30
31#include "iscsi_target_core.h" 31#include <target/iscsi/iscsi_target_core.h>
32#include "iscsi_target_parameters.h" 32#include "iscsi_target_parameters.h"
33#include "iscsi_target_device.h" 33#include "iscsi_target_device.h"
34#include "iscsi_target_erl0.h" 34#include "iscsi_target_erl0.h"
@@ -36,7 +36,7 @@
36#include "iscsi_target_tpg.h" 36#include "iscsi_target_tpg.h"
37#include "iscsi_target_util.h" 37#include "iscsi_target_util.h"
38#include "iscsi_target.h" 38#include "iscsi_target.h"
39#include "iscsi_target_stat.h" 39#include <target/iscsi/iscsi_target_stat.h>
40#include "iscsi_target_configfs.h" 40#include "iscsi_target_configfs.h"
41 41
42struct target_fabric_configfs *lio_target_fabric_configfs; 42struct target_fabric_configfs *lio_target_fabric_configfs;
@@ -674,12 +674,9 @@ static ssize_t lio_target_nacl_show_info(
674 rb += sprintf(page+rb, "InitiatorAlias: %s\n", 674 rb += sprintf(page+rb, "InitiatorAlias: %s\n",
675 sess->sess_ops->InitiatorAlias); 675 sess->sess_ops->InitiatorAlias);
676 676
677 rb += sprintf(page+rb, "LIO Session ID: %u " 677 rb += sprintf(page+rb,
678 "ISID: 0x%02x %02x %02x %02x %02x %02x " 678 "LIO Session ID: %u ISID: 0x%6ph TSIH: %hu ",
679 "TSIH: %hu ", sess->sid, 679 sess->sid, sess->isid, sess->tsih);
680 sess->isid[0], sess->isid[1], sess->isid[2],
681 sess->isid[3], sess->isid[4], sess->isid[5],
682 sess->tsih);
683 rb += sprintf(page+rb, "SessionType: %s\n", 680 rb += sprintf(page+rb, "SessionType: %s\n",
684 (sess->sess_ops->SessionType) ? 681 (sess->sess_ops->SessionType) ?
685 "Discovery" : "Normal"); 682 "Discovery" : "Normal");
@@ -1758,9 +1755,7 @@ static u32 lio_sess_get_initiator_sid(
1758 /* 1755 /*
1759 * iSCSI Initiator Session Identifier from RFC-3720. 1756 * iSCSI Initiator Session Identifier from RFC-3720.
1760 */ 1757 */
1761 return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x", 1758 return snprintf(buf, size, "%6phN", sess->isid);
1762 sess->isid[0], sess->isid[1], sess->isid[2],
1763 sess->isid[3], sess->isid[4], sess->isid[5]);
1764} 1759}
1765 1760
1766static int lio_queue_data_in(struct se_cmd *se_cmd) 1761static int lio_queue_data_in(struct se_cmd *se_cmd)
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index e93d5a7a3f81..fb3b52b124ac 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -18,7 +18,7 @@
18 18
19#include <scsi/iscsi_proto.h> 19#include <scsi/iscsi_proto.h>
20 20
21#include "iscsi_target_core.h" 21#include <target/iscsi/iscsi_target_core.h>
22#include "iscsi_target_seq_pdu_list.h" 22#include "iscsi_target_seq_pdu_list.h"
23#include "iscsi_target_erl1.h" 23#include "iscsi_target_erl1.h"
24#include "iscsi_target_util.h" 24#include "iscsi_target_util.h"
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index 7087c736daa5..34c3cd1b05ce 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -21,7 +21,7 @@
21#include <target/target_core_base.h> 21#include <target/target_core_base.h>
22#include <target/target_core_fabric.h> 22#include <target/target_core_fabric.h>
23 23
24#include "iscsi_target_core.h" 24#include <target/iscsi/iscsi_target_core.h>
25#include "iscsi_target_device.h" 25#include "iscsi_target_device.h"
26#include "iscsi_target_tpg.h" 26#include "iscsi_target_tpg.h"
27#include "iscsi_target_util.h" 27#include "iscsi_target_util.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index a0ae5fc0ad75..1c197bad6132 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -21,7 +21,8 @@
21#include <target/target_core_base.h> 21#include <target/target_core_base.h>
22#include <target/target_core_fabric.h> 22#include <target/target_core_fabric.h>
23 23
24#include "iscsi_target_core.h" 24#include <target/iscsi/iscsi_target_core.h>
25#include <target/iscsi/iscsi_transport.h>
25#include "iscsi_target_seq_pdu_list.h" 26#include "iscsi_target_seq_pdu_list.h"
26#include "iscsi_target_tq.h" 27#include "iscsi_target_tq.h"
27#include "iscsi_target_erl0.h" 28#include "iscsi_target_erl0.h"
@@ -939,7 +940,8 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
939 940
940 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { 941 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
941 spin_unlock_bh(&conn->state_lock); 942 spin_unlock_bh(&conn->state_lock);
942 iscsit_close_connection(conn); 943 if (conn->conn_transport->transport_type == ISCSI_TCP)
944 iscsit_close_connection(conn);
943 return; 945 return;
944 } 946 }
945 947
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index cda4d80cfaef..2e561deb30a2 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -22,7 +22,7 @@
22#include <target/target_core_fabric.h> 22#include <target/target_core_fabric.h>
23#include <target/iscsi/iscsi_transport.h> 23#include <target/iscsi/iscsi_transport.h>
24 24
25#include "iscsi_target_core.h" 25#include <target/iscsi/iscsi_target_core.h>
26#include "iscsi_target_seq_pdu_list.h" 26#include "iscsi_target_seq_pdu_list.h"
27#include "iscsi_target_datain_values.h" 27#include "iscsi_target_datain_values.h"
28#include "iscsi_target_device.h" 28#include "iscsi_target_device.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 4ca8fd2a70db..e24f1c7c5862 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -21,7 +21,7 @@
21#include <target/target_core_base.h> 21#include <target/target_core_base.h>
22#include <target/target_core_fabric.h> 22#include <target/target_core_fabric.h>
23 23
24#include "iscsi_target_core.h" 24#include <target/iscsi/iscsi_target_core.h>
25#include "iscsi_target_datain_values.h" 25#include "iscsi_target_datain_values.h"
26#include "iscsi_target_util.h" 26#include "iscsi_target_util.h"
27#include "iscsi_target_erl0.h" 27#include "iscsi_target_erl0.h"
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 713c0c1877ab..153fb66ac1b8 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -24,14 +24,14 @@
24#include <target/target_core_base.h> 24#include <target/target_core_base.h>
25#include <target/target_core_fabric.h> 25#include <target/target_core_fabric.h>
26 26
27#include "iscsi_target_core.h" 27#include <target/iscsi/iscsi_target_core.h>
28#include <target/iscsi/iscsi_target_stat.h>
28#include "iscsi_target_tq.h" 29#include "iscsi_target_tq.h"
29#include "iscsi_target_device.h" 30#include "iscsi_target_device.h"
30#include "iscsi_target_nego.h" 31#include "iscsi_target_nego.h"
31#include "iscsi_target_erl0.h" 32#include "iscsi_target_erl0.h"
32#include "iscsi_target_erl2.h" 33#include "iscsi_target_erl2.h"
33#include "iscsi_target_login.h" 34#include "iscsi_target_login.h"
34#include "iscsi_target_stat.h"
35#include "iscsi_target_tpg.h" 35#include "iscsi_target_tpg.h"
36#include "iscsi_target_util.h" 36#include "iscsi_target_util.h"
37#include "iscsi_target.h" 37#include "iscsi_target.h"
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 62a095f36bf2..8c02fa34716f 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -22,7 +22,7 @@
22#include <target/target_core_fabric.h> 22#include <target/target_core_fabric.h>
23#include <target/iscsi/iscsi_transport.h> 23#include <target/iscsi/iscsi_transport.h>
24 24
25#include "iscsi_target_core.h" 25#include <target/iscsi/iscsi_target_core.h>
26#include "iscsi_target_parameters.h" 26#include "iscsi_target_parameters.h"
27#include "iscsi_target_login.h" 27#include "iscsi_target_login.h"
28#include "iscsi_target_nego.h" 28#include "iscsi_target_nego.h"
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index 16454a922e2b..208cca8a363c 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -18,7 +18,7 @@
18 18
19#include <target/target_core_base.h> 19#include <target/target_core_base.h>
20 20
21#include "iscsi_target_core.h" 21#include <target/iscsi/iscsi_target_core.h>
22#include "iscsi_target_device.h" 22#include "iscsi_target_device.h"
23#include "iscsi_target_tpg.h" 23#include "iscsi_target_tpg.h"
24#include "iscsi_target_util.h" 24#include "iscsi_target_util.h"
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 18c29260b4a2..d4f9e9645697 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -18,7 +18,7 @@
18 18
19#include <linux/slab.h> 19#include <linux/slab.h>
20 20
21#include "iscsi_target_core.h" 21#include <target/iscsi/iscsi_target_core.h>
22#include "iscsi_target_util.h" 22#include "iscsi_target_util.h"
23#include "iscsi_target_parameters.h" 23#include "iscsi_target_parameters.h"
24 24
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index ca41b583f2f6..e446a09c886b 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -20,7 +20,7 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/random.h> 21#include <linux/random.h>
22 22
23#include "iscsi_target_core.h" 23#include <target/iscsi/iscsi_target_core.h>
24#include "iscsi_target_util.h" 24#include "iscsi_target_util.h"
25#include "iscsi_target_tpg.h" 25#include "iscsi_target_tpg.h"
26#include "iscsi_target_seq_pdu_list.h" 26#include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 103395510307..5e1349a3b143 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -23,12 +23,12 @@
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/configfs_macros.h> 24#include <target/configfs_macros.h>
25 25
26#include "iscsi_target_core.h" 26#include <target/iscsi/iscsi_target_core.h>
27#include "iscsi_target_parameters.h" 27#include "iscsi_target_parameters.h"
28#include "iscsi_target_device.h" 28#include "iscsi_target_device.h"
29#include "iscsi_target_tpg.h" 29#include "iscsi_target_tpg.h"
30#include "iscsi_target_util.h" 30#include "iscsi_target_util.h"
31#include "iscsi_target_stat.h" 31#include <target/iscsi/iscsi_target_stat.h>
32 32
33#ifndef INITIAL_JIFFIES 33#ifndef INITIAL_JIFFIES
34#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) 34#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 78404b1cc0bf..b0224a77e26d 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -23,7 +23,7 @@
23#include <target/target_core_fabric.h> 23#include <target/target_core_fabric.h>
24#include <target/iscsi/iscsi_transport.h> 24#include <target/iscsi/iscsi_transport.h>
25 25
26#include "iscsi_target_core.h" 26#include <target/iscsi/iscsi_target_core.h>
27#include "iscsi_target_seq_pdu_list.h" 27#include "iscsi_target_seq_pdu_list.h"
28#include "iscsi_target_datain_values.h" 28#include "iscsi_target_datain_values.h"
29#include "iscsi_target_device.h" 29#include "iscsi_target_device.h"
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 9053a3c0c6e5..bdd127c0e3ae 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -20,7 +20,7 @@
20#include <target/target_core_fabric.h> 20#include <target/target_core_fabric.h>
21#include <target/target_core_configfs.h> 21#include <target/target_core_configfs.h>
22 22
23#include "iscsi_target_core.h" 23#include <target/iscsi/iscsi_target_core.h>
24#include "iscsi_target_erl0.h" 24#include "iscsi_target_erl0.h"
25#include "iscsi_target_login.h" 25#include "iscsi_target_login.h"
26#include "iscsi_target_nodeattrib.h" 26#include "iscsi_target_nodeattrib.h"
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
index 601e9cc61e98..26aa50996473 100644
--- a/drivers/target/iscsi/iscsi_target_tq.c
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -20,40 +20,26 @@
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/bitmap.h> 21#include <linux/bitmap.h>
22 22
23#include "iscsi_target_core.h" 23#include <target/iscsi/iscsi_target_core.h>
24#include "iscsi_target_tq.h" 24#include "iscsi_target_tq.h"
25#include "iscsi_target.h" 25#include "iscsi_target.h"
26 26
27static LIST_HEAD(active_ts_list);
28static LIST_HEAD(inactive_ts_list); 27static LIST_HEAD(inactive_ts_list);
29static DEFINE_SPINLOCK(active_ts_lock);
30static DEFINE_SPINLOCK(inactive_ts_lock); 28static DEFINE_SPINLOCK(inactive_ts_lock);
31static DEFINE_SPINLOCK(ts_bitmap_lock); 29static DEFINE_SPINLOCK(ts_bitmap_lock);
32 30
33static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
34{
35 spin_lock(&active_ts_lock);
36 list_add_tail(&ts->ts_list, &active_ts_list);
37 iscsit_global->active_ts++;
38 spin_unlock(&active_ts_lock);
39}
40
41static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts) 31static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
42{ 32{
33 if (!list_empty(&ts->ts_list)) {
34 WARN_ON(1);
35 return;
36 }
43 spin_lock(&inactive_ts_lock); 37 spin_lock(&inactive_ts_lock);
44 list_add_tail(&ts->ts_list, &inactive_ts_list); 38 list_add_tail(&ts->ts_list, &inactive_ts_list);
45 iscsit_global->inactive_ts++; 39 iscsit_global->inactive_ts++;
46 spin_unlock(&inactive_ts_lock); 40 spin_unlock(&inactive_ts_lock);
47} 41}
48 42
49static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
50{
51 spin_lock(&active_ts_lock);
52 list_del(&ts->ts_list);
53 iscsit_global->active_ts--;
54 spin_unlock(&active_ts_lock);
55}
56
57static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) 43static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
58{ 44{
59 struct iscsi_thread_set *ts; 45 struct iscsi_thread_set *ts;
@@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
66 52
67 ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list); 53 ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
68 54
69 list_del(&ts->ts_list); 55 list_del_init(&ts->ts_list);
70 iscsit_global->inactive_ts--; 56 iscsit_global->inactive_ts--;
71 spin_unlock(&inactive_ts_lock); 57 spin_unlock(&inactive_ts_lock);
72 58
@@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void)
204 190
205void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts) 191void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
206{ 192{
207 iscsi_add_ts_to_active_list(ts);
208
209 spin_lock_bh(&ts->ts_state_lock); 193 spin_lock_bh(&ts->ts_state_lock);
210 conn->thread_set = ts; 194 conn->thread_set = ts;
211 ts->conn = conn; 195 ts->conn = conn;
@@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
397 381
398 if (ts->delay_inactive && (--ts->thread_count == 0)) { 382 if (ts->delay_inactive && (--ts->thread_count == 0)) {
399 spin_unlock_bh(&ts->ts_state_lock); 383 spin_unlock_bh(&ts->ts_state_lock);
400 iscsi_del_ts_from_active_list(ts);
401 384
402 if (!iscsit_global->in_shutdown) 385 if (!iscsit_global->in_shutdown)
403 iscsi_deallocate_extra_thread_sets(); 386 iscsi_deallocate_extra_thread_sets();
@@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
452 435
453 if (ts->delay_inactive && (--ts->thread_count == 0)) { 436 if (ts->delay_inactive && (--ts->thread_count == 0)) {
454 spin_unlock_bh(&ts->ts_state_lock); 437 spin_unlock_bh(&ts->ts_state_lock);
455 iscsi_del_ts_from_active_list(ts);
456 438
457 if (!iscsit_global->in_shutdown) 439 if (!iscsit_global->in_shutdown)
458 iscsi_deallocate_extra_thread_sets(); 440 iscsi_deallocate_extra_thread_sets();
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index bcd88ec99793..390df8ed72b2 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -25,7 +25,7 @@
25#include <target/target_core_configfs.h> 25#include <target/target_core_configfs.h>
26#include <target/iscsi/iscsi_transport.h> 26#include <target/iscsi/iscsi_transport.h>
27 27
28#include "iscsi_target_core.h" 28#include <target/iscsi/iscsi_target_core.h>
29#include "iscsi_target_parameters.h" 29#include "iscsi_target_parameters.h"
30#include "iscsi_target_seq_pdu_list.h" 30#include "iscsi_target_seq_pdu_list.h"
31#include "iscsi_target_datain_values.h" 31#include "iscsi_target_datain_values.h"
@@ -390,6 +390,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt(
390 init_task_tag, conn->cid); 390 init_task_tag, conn->cid);
391 return NULL; 391 return NULL;
392} 392}
393EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
393 394
394struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( 395struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
395 struct iscsi_conn *conn, 396 struct iscsi_conn *conn,
@@ -939,13 +940,8 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
939 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE : 940 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
940 ISTATE_SEND_NOPIN_NO_RESPONSE; 941 ISTATE_SEND_NOPIN_NO_RESPONSE;
941 cmd->init_task_tag = RESERVED_ITT; 942 cmd->init_task_tag = RESERVED_ITT;
942 spin_lock_bh(&conn->sess->ttt_lock); 943 cmd->targ_xfer_tag = (want_response) ?
943 cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ : 944 session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
944 0xFFFFFFFF;
945 if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
946 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
947 spin_unlock_bh(&conn->sess->ttt_lock);
948
949 spin_lock_bh(&conn->cmd_lock); 945 spin_lock_bh(&conn->cmd_lock);
950 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 946 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
951 spin_unlock_bh(&conn->cmd_lock); 947 spin_unlock_bh(&conn->cmd_lock);
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index a68508c4fec8..1ab754a671ff 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -16,7 +16,6 @@ extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
16extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 16extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
17 unsigned char * ,__be32 cmdsn); 17 unsigned char * ,__be32 cmdsn);
18extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *); 18extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
19extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
20extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *, 19extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
21 itt_t, u32); 20 itt_t, u32);
22extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32); 21extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index d836de200a03..44620fb6bd45 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -494,6 +494,11 @@ fd_execute_write_same(struct se_cmd *cmd)
494 target_complete_cmd(cmd, SAM_STAT_GOOD); 494 target_complete_cmd(cmd, SAM_STAT_GOOD);
495 return 0; 495 return 0;
496 } 496 }
497 if (cmd->prot_op) {
498 pr_err("WRITE_SAME: Protection information with FILEIO"
499 " backends not supported\n");
500 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
501 }
497 sg = &cmd->t_data_sg[0]; 502 sg = &cmd->t_data_sg[0];
498 503
499 if (cmd->t_data_nents > 1 || 504 if (cmd->t_data_nents > 1 ||
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 78346b850968..d4a4b0fb444a 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -464,6 +464,11 @@ iblock_execute_write_same(struct se_cmd *cmd)
464 sector_t block_lba = cmd->t_task_lba; 464 sector_t block_lba = cmd->t_task_lba;
465 sector_t sectors = sbc_get_write_same_sectors(cmd); 465 sector_t sectors = sbc_get_write_same_sectors(cmd);
466 466
467 if (cmd->prot_op) {
468 pr_err("WRITE_SAME: Protection information with IBLOCK"
469 " backends not supported\n");
470 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
471 }
467 sg = &cmd->t_data_sg[0]; 472 sg = &cmd->t_data_sg[0];
468 473
469 if (cmd->t_data_nents > 1 || 474 if (cmd->t_data_nents > 1 ||
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 283cf786ef98..2de6fb8cee8d 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1874,8 +1874,8 @@ static int core_scsi3_update_aptpl_buf(
1874 } 1874 }
1875 1875
1876 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { 1876 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1877 pr_err("Unable to update renaming" 1877 pr_err("Unable to update renaming APTPL metadata,"
1878 " APTPL metadata\n"); 1878 " reallocating larger buffer\n");
1879 ret = -EMSGSIZE; 1879 ret = -EMSGSIZE;
1880 goto out; 1880 goto out;
1881 } 1881 }
@@ -1892,8 +1892,8 @@ static int core_scsi3_update_aptpl_buf(
1892 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); 1892 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
1893 1893
1894 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { 1894 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1895 pr_err("Unable to update renaming" 1895 pr_err("Unable to update renaming APTPL metadata,"
1896 " APTPL metadata\n"); 1896 " reallocating larger buffer\n");
1897 ret = -EMSGSIZE; 1897 ret = -EMSGSIZE;
1898 goto out; 1898 goto out;
1899 } 1899 }
@@ -1956,7 +1956,7 @@ static int __core_scsi3_write_aptpl_to_file(
1956static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl) 1956static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
1957{ 1957{
1958 unsigned char *buf; 1958 unsigned char *buf;
1959 int rc; 1959 int rc, len = PR_APTPL_BUF_LEN;
1960 1960
1961 if (!aptpl) { 1961 if (!aptpl) {
1962 char *null_buf = "No Registrations or Reservations\n"; 1962 char *null_buf = "No Registrations or Reservations\n";
@@ -1970,25 +1970,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b
1970 1970
1971 return 0; 1971 return 0;
1972 } 1972 }
1973 1973retry:
1974 buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL); 1974 buf = vzalloc(len);
1975 if (!buf) 1975 if (!buf)
1976 return TCM_OUT_OF_RESOURCES; 1976 return TCM_OUT_OF_RESOURCES;
1977 1977
1978 rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN); 1978 rc = core_scsi3_update_aptpl_buf(dev, buf, len);
1979 if (rc < 0) { 1979 if (rc < 0) {
1980 kfree(buf); 1980 vfree(buf);
1981 return TCM_OUT_OF_RESOURCES; 1981 len *= 2;
1982 goto retry;
1982 } 1983 }
1983 1984
1984 rc = __core_scsi3_write_aptpl_to_file(dev, buf); 1985 rc = __core_scsi3_write_aptpl_to_file(dev, buf);
1985 if (rc != 0) { 1986 if (rc != 0) {
1986 pr_err("SPC-3 PR: Could not update APTPL\n"); 1987 pr_err("SPC-3 PR: Could not update APTPL\n");
1987 kfree(buf); 1988 vfree(buf);
1988 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1989 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1989 } 1990 }
1990 dev->t10_pr.pr_aptpl_active = 1; 1991 dev->t10_pr.pr_aptpl_active = 1;
1991 kfree(buf); 1992 vfree(buf);
1992 pr_debug("SPC-3 PR: Set APTPL Bit Activated\n"); 1993 pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
1993 return 0; 1994 return 0;
1994} 1995}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index cd4bed7b2757..9a2f9d3a6e70 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -37,6 +37,9 @@
37#include "target_core_alua.h" 37#include "target_core_alua.h"
38 38
39static sense_reason_t 39static sense_reason_t
40sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
41
42static sense_reason_t
40sbc_emulate_readcapacity(struct se_cmd *cmd) 43sbc_emulate_readcapacity(struct se_cmd *cmd)
41{ 44{
42 struct se_device *dev = cmd->se_dev; 45 struct se_device *dev = cmd->se_dev;
@@ -251,7 +254,10 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
251static sense_reason_t 254static sense_reason_t
252sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 255sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
253{ 256{
257 struct se_device *dev = cmd->se_dev;
258 sector_t end_lba = dev->transport->get_blocks(dev) + 1;
254 unsigned int sectors = sbc_get_write_same_sectors(cmd); 259 unsigned int sectors = sbc_get_write_same_sectors(cmd);
260 sense_reason_t ret;
255 261
256 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 262 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
257 pr_err("WRITE_SAME PBDATA and LBDATA" 263 pr_err("WRITE_SAME PBDATA and LBDATA"
@@ -264,6 +270,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
264 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 270 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
265 return TCM_INVALID_CDB_FIELD; 271 return TCM_INVALID_CDB_FIELD;
266 } 272 }
273 /*
274 * Sanity check for LBA wrap and request past end of device.
275 */
276 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
277 ((cmd->t_task_lba + sectors) > end_lba)) {
278 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
279 (unsigned long long)end_lba, cmd->t_task_lba, sectors);
280 return TCM_ADDRESS_OUT_OF_RANGE;
281 }
282
267 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 283 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
268 if (flags[0] & 0x10) { 284 if (flags[0] & 0x10) {
269 pr_warn("WRITE SAME with ANCHOR not supported\n"); 285 pr_warn("WRITE SAME with ANCHOR not supported\n");
@@ -277,12 +293,21 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
277 if (!ops->execute_write_same_unmap) 293 if (!ops->execute_write_same_unmap)
278 return TCM_UNSUPPORTED_SCSI_OPCODE; 294 return TCM_UNSUPPORTED_SCSI_OPCODE;
279 295
296 if (!dev->dev_attrib.emulate_tpws) {
297 pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
298 " has emulate_tpws disabled\n");
299 return TCM_UNSUPPORTED_SCSI_OPCODE;
300 }
280 cmd->execute_cmd = ops->execute_write_same_unmap; 301 cmd->execute_cmd = ops->execute_write_same_unmap;
281 return 0; 302 return 0;
282 } 303 }
283 if (!ops->execute_write_same) 304 if (!ops->execute_write_same)
284 return TCM_UNSUPPORTED_SCSI_OPCODE; 305 return TCM_UNSUPPORTED_SCSI_OPCODE;
285 306
307 ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
308 if (ret)
309 return ret;
310
286 cmd->execute_cmd = ops->execute_write_same; 311 cmd->execute_cmd = ops->execute_write_same;
287 return 0; 312 return 0;
288} 313}
@@ -614,14 +639,21 @@ sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
614 return 0; 639 return 0;
615} 640}
616 641
617static bool 642static sense_reason_t
618sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 643sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
619 u32 sectors, bool is_write) 644 u32 sectors, bool is_write)
620{ 645{
621 u8 protect = cdb[1] >> 5; 646 u8 protect = cdb[1] >> 5;
622 647
623 if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto) 648 if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
624 return true; 649 if (protect && !dev->dev_attrib.pi_prot_type) {
650 pr_err("CDB contains protect bit, but device does not"
651 " advertise PROTECT=1 feature bit\n");
652 return TCM_INVALID_CDB_FIELD;
653 }
654 if (cmd->prot_pto)
655 return TCM_NO_SENSE;
656 }
625 657
626 switch (dev->dev_attrib.pi_prot_type) { 658 switch (dev->dev_attrib.pi_prot_type) {
627 case TARGET_DIF_TYPE3_PROT: 659 case TARGET_DIF_TYPE3_PROT:
@@ -629,7 +661,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
629 break; 661 break;
630 case TARGET_DIF_TYPE2_PROT: 662 case TARGET_DIF_TYPE2_PROT:
631 if (protect) 663 if (protect)
632 return false; 664 return TCM_INVALID_CDB_FIELD;
633 665
634 cmd->reftag_seed = cmd->t_task_lba; 666 cmd->reftag_seed = cmd->t_task_lba;
635 break; 667 break;
@@ -638,12 +670,12 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
638 break; 670 break;
639 case TARGET_DIF_TYPE0_PROT: 671 case TARGET_DIF_TYPE0_PROT:
640 default: 672 default:
641 return true; 673 return TCM_NO_SENSE;
642 } 674 }
643 675
644 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, 676 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
645 is_write, cmd)) 677 is_write, cmd))
646 return false; 678 return TCM_INVALID_CDB_FIELD;
647 679
648 cmd->prot_type = dev->dev_attrib.pi_prot_type; 680 cmd->prot_type = dev->dev_attrib.pi_prot_type;
649 cmd->prot_length = dev->prot_length * sectors; 681 cmd->prot_length = dev->prot_length * sectors;
@@ -662,7 +694,30 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
662 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, 694 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
663 cmd->prot_op, cmd->prot_checks); 695 cmd->prot_op, cmd->prot_checks);
664 696
665 return true; 697 return TCM_NO_SENSE;
698}
699
700static int
701sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
702{
703 if (cdb[1] & 0x10) {
704 if (!dev->dev_attrib.emulate_dpo) {
705 pr_err("Got CDB: 0x%02x with DPO bit set, but device"
706 " does not advertise support for DPO\n", cdb[0]);
707 return -EINVAL;
708 }
709 }
710 if (cdb[1] & 0x8) {
711 if (!dev->dev_attrib.emulate_fua_write ||
712 !dev->dev_attrib.emulate_write_cache) {
713 pr_err("Got CDB: 0x%02x with FUA bit set, but device"
714 " does not advertise support for FUA write\n",
715 cdb[0]);
716 return -EINVAL;
717 }
718 cmd->se_cmd_flags |= SCF_FUA;
719 }
720 return 0;
666} 721}
667 722
668sense_reason_t 723sense_reason_t
@@ -686,8 +741,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
686 sectors = transport_get_sectors_10(cdb); 741 sectors = transport_get_sectors_10(cdb);
687 cmd->t_task_lba = transport_lba_32(cdb); 742 cmd->t_task_lba = transport_lba_32(cdb);
688 743
689 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 744 if (sbc_check_dpofua(dev, cmd, cdb))
690 return TCM_UNSUPPORTED_SCSI_OPCODE; 745 return TCM_INVALID_CDB_FIELD;
746
747 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
748 if (ret)
749 return ret;
691 750
692 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 751 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
693 cmd->execute_rw = ops->execute_rw; 752 cmd->execute_rw = ops->execute_rw;
@@ -697,8 +756,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
697 sectors = transport_get_sectors_12(cdb); 756 sectors = transport_get_sectors_12(cdb);
698 cmd->t_task_lba = transport_lba_32(cdb); 757 cmd->t_task_lba = transport_lba_32(cdb);
699 758
700 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 759 if (sbc_check_dpofua(dev, cmd, cdb))
701 return TCM_UNSUPPORTED_SCSI_OPCODE; 760 return TCM_INVALID_CDB_FIELD;
761
762 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
763 if (ret)
764 return ret;
702 765
703 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 766 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
704 cmd->execute_rw = ops->execute_rw; 767 cmd->execute_rw = ops->execute_rw;
@@ -708,8 +771,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
708 sectors = transport_get_sectors_16(cdb); 771 sectors = transport_get_sectors_16(cdb);
709 cmd->t_task_lba = transport_lba_64(cdb); 772 cmd->t_task_lba = transport_lba_64(cdb);
710 773
711 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 774 if (sbc_check_dpofua(dev, cmd, cdb))
712 return TCM_UNSUPPORTED_SCSI_OPCODE; 775 return TCM_INVALID_CDB_FIELD;
776
777 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
778 if (ret)
779 return ret;
713 780
714 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 781 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
715 cmd->execute_rw = ops->execute_rw; 782 cmd->execute_rw = ops->execute_rw;
@@ -727,11 +794,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
727 sectors = transport_get_sectors_10(cdb); 794 sectors = transport_get_sectors_10(cdb);
728 cmd->t_task_lba = transport_lba_32(cdb); 795 cmd->t_task_lba = transport_lba_32(cdb);
729 796
730 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 797 if (sbc_check_dpofua(dev, cmd, cdb))
731 return TCM_UNSUPPORTED_SCSI_OPCODE; 798 return TCM_INVALID_CDB_FIELD;
799
800 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
801 if (ret)
802 return ret;
732 803
733 if (cdb[1] & 0x8)
734 cmd->se_cmd_flags |= SCF_FUA;
735 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 804 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
736 cmd->execute_rw = ops->execute_rw; 805 cmd->execute_rw = ops->execute_rw;
737 cmd->execute_cmd = sbc_execute_rw; 806 cmd->execute_cmd = sbc_execute_rw;
@@ -740,11 +809,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
740 sectors = transport_get_sectors_12(cdb); 809 sectors = transport_get_sectors_12(cdb);
741 cmd->t_task_lba = transport_lba_32(cdb); 810 cmd->t_task_lba = transport_lba_32(cdb);
742 811
743 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 812 if (sbc_check_dpofua(dev, cmd, cdb))
744 return TCM_UNSUPPORTED_SCSI_OPCODE; 813 return TCM_INVALID_CDB_FIELD;
814
815 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
816 if (ret)
817 return ret;
745 818
746 if (cdb[1] & 0x8)
747 cmd->se_cmd_flags |= SCF_FUA;
748 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 819 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
749 cmd->execute_rw = ops->execute_rw; 820 cmd->execute_rw = ops->execute_rw;
750 cmd->execute_cmd = sbc_execute_rw; 821 cmd->execute_cmd = sbc_execute_rw;
@@ -753,11 +824,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
753 sectors = transport_get_sectors_16(cdb); 824 sectors = transport_get_sectors_16(cdb);
754 cmd->t_task_lba = transport_lba_64(cdb); 825 cmd->t_task_lba = transport_lba_64(cdb);
755 826
756 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 827 if (sbc_check_dpofua(dev, cmd, cdb))
757 return TCM_UNSUPPORTED_SCSI_OPCODE; 828 return TCM_INVALID_CDB_FIELD;
829
830 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
831 if (ret)
832 return ret;
758 833
759 if (cdb[1] & 0x8)
760 cmd->se_cmd_flags |= SCF_FUA;
761 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 834 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
762 cmd->execute_rw = ops->execute_rw; 835 cmd->execute_rw = ops->execute_rw;
763 cmd->execute_cmd = sbc_execute_rw; 836 cmd->execute_cmd = sbc_execute_rw;
@@ -768,6 +841,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
768 return TCM_INVALID_CDB_FIELD; 841 return TCM_INVALID_CDB_FIELD;
769 sectors = transport_get_sectors_10(cdb); 842 sectors = transport_get_sectors_10(cdb);
770 843
844 if (sbc_check_dpofua(dev, cmd, cdb))
845 return TCM_INVALID_CDB_FIELD;
846
771 cmd->t_task_lba = transport_lba_32(cdb); 847 cmd->t_task_lba = transport_lba_32(cdb);
772 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 848 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
773 849
@@ -777,8 +853,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
777 cmd->execute_rw = ops->execute_rw; 853 cmd->execute_rw = ops->execute_rw;
778 cmd->execute_cmd = sbc_execute_rw; 854 cmd->execute_cmd = sbc_execute_rw;
779 cmd->transport_complete_callback = &xdreadwrite_callback; 855 cmd->transport_complete_callback = &xdreadwrite_callback;
780 if (cdb[1] & 0x8)
781 cmd->se_cmd_flags |= SCF_FUA;
782 break; 856 break;
783 case VARIABLE_LENGTH_CMD: 857 case VARIABLE_LENGTH_CMD:
784 { 858 {
@@ -787,6 +861,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
787 case XDWRITEREAD_32: 861 case XDWRITEREAD_32:
788 sectors = transport_get_sectors_32(cdb); 862 sectors = transport_get_sectors_32(cdb);
789 863
864 if (sbc_check_dpofua(dev, cmd, cdb))
865 return TCM_INVALID_CDB_FIELD;
790 /* 866 /*
791 * Use WRITE_32 and READ_32 opcodes for the emulated 867 * Use WRITE_32 and READ_32 opcodes for the emulated
792 * XDWRITE_READ_32 logic. 868 * XDWRITE_READ_32 logic.
@@ -801,8 +877,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
801 cmd->execute_rw = ops->execute_rw; 877 cmd->execute_rw = ops->execute_rw;
802 cmd->execute_cmd = sbc_execute_rw; 878 cmd->execute_cmd = sbc_execute_rw;
803 cmd->transport_complete_callback = &xdreadwrite_callback; 879 cmd->transport_complete_callback = &xdreadwrite_callback;
804 if (cdb[1] & 0x8)
805 cmd->se_cmd_flags |= SCF_FUA;
806 break; 880 break;
807 case WRITE_SAME_32: 881 case WRITE_SAME_32:
808 sectors = transport_get_sectors_32(cdb); 882 sectors = transport_get_sectors_32(cdb);
@@ -888,6 +962,11 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
888 if (!ops->execute_unmap) 962 if (!ops->execute_unmap)
889 return TCM_UNSUPPORTED_SCSI_OPCODE; 963 return TCM_UNSUPPORTED_SCSI_OPCODE;
890 964
965 if (!dev->dev_attrib.emulate_tpu) {
966 pr_err("Got UNMAP, but backend device has"
967 " emulate_tpu disabled\n");
968 return TCM_UNSUPPORTED_SCSI_OPCODE;
969 }
891 size = get_unaligned_be16(&cdb[7]); 970 size = get_unaligned_be16(&cdb[7]);
892 cmd->execute_cmd = ops->execute_unmap; 971 cmd->execute_cmd = ops->execute_unmap;
893 break; 972 break;
@@ -955,7 +1034,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
955 unsigned long long end_lba; 1034 unsigned long long end_lba;
956check_lba: 1035check_lba:
957 end_lba = dev->transport->get_blocks(dev) + 1; 1036 end_lba = dev->transport->get_blocks(dev) + 1;
958 if (cmd->t_task_lba + sectors > end_lba) { 1037 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
1038 ((cmd->t_task_lba + sectors) > end_lba)) {
959 pr_err("cmd exceeds last lba %llu " 1039 pr_err("cmd exceeds last lba %llu "
960 "(lba %llu, sectors %u)\n", 1040 "(lba %llu, sectors %u)\n",
961 end_lba, cmd->t_task_lba, sectors); 1041 end_lba, cmd->t_task_lba, sectors);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 4c71657da56a..460e93109473 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -647,7 +647,7 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
647 * support the use of the WRITE SAME (16) command to unmap LBAs. 647 * support the use of the WRITE SAME (16) command to unmap LBAs.
648 */ 648 */
649 if (dev->dev_attrib.emulate_tpws != 0) 649 if (dev->dev_attrib.emulate_tpws != 0)
650 buf[5] |= 0x40; 650 buf[5] |= 0x40 | 0x20;
651 651
652 return 0; 652 return 0;
653} 653}
diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile
index d4413698a85f..ba77a34f659f 100644
--- a/drivers/thermal/int340x_thermal/Makefile
+++ b/drivers/thermal/int340x_thermal/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o 1obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o
2obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal_zone.o
2obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o 3obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
3obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o 4obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o
4obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o 5obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index 65a98a97df07..031018e7a65b 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -18,19 +18,15 @@
18 18
19enum int3400_thermal_uuid { 19enum int3400_thermal_uuid {
20 INT3400_THERMAL_PASSIVE_1, 20 INT3400_THERMAL_PASSIVE_1,
21 INT3400_THERMAL_PASSIVE_2,
22 INT3400_THERMAL_ACTIVE, 21 INT3400_THERMAL_ACTIVE,
23 INT3400_THERMAL_CRITICAL, 22 INT3400_THERMAL_CRITICAL,
24 INT3400_THERMAL_COOLING_MODE,
25 INT3400_THERMAL_MAXIMUM_UUID, 23 INT3400_THERMAL_MAXIMUM_UUID,
26}; 24};
27 25
28static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = { 26static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
29 "42A441D6-AE6A-462b-A84B-4A8CE79027D3", 27 "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
30 "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
31 "3A95C389-E4B8-4629-A526-C52C88626BAE", 28 "3A95C389-E4B8-4629-A526-C52C88626BAE",
32 "97C68AE7-15FA-499c-B8C9-5DA81D606E0A", 29 "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
33 "16CAF1B7-DD38-40ed-B1C1-1B8A1913D531",
34}; 30};
35 31
36struct int3400_thermal_priv { 32struct int3400_thermal_priv {
@@ -266,13 +262,12 @@ static int int3400_thermal_probe(struct platform_device *pdev)
266 result = acpi_parse_art(priv->adev->handle, &priv->art_count, 262 result = acpi_parse_art(priv->adev->handle, &priv->art_count,
267 &priv->arts, true); 263 &priv->arts, true);
268 if (result) 264 if (result)
269 goto free_priv; 265 dev_dbg(&pdev->dev, "_ART table parsing error\n");
270
271 266
272 result = acpi_parse_trt(priv->adev->handle, &priv->trt_count, 267 result = acpi_parse_trt(priv->adev->handle, &priv->trt_count,
273 &priv->trts, true); 268 &priv->trts, true);
274 if (result) 269 if (result)
275 goto free_art; 270 dev_dbg(&pdev->dev, "_TRT table parsing error\n");
276 271
277 platform_set_drvdata(pdev, priv); 272 platform_set_drvdata(pdev, priv);
278 273
@@ -285,7 +280,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
285 &int3400_thermal_params, 0, 0); 280 &int3400_thermal_params, 0, 0);
286 if (IS_ERR(priv->thermal)) { 281 if (IS_ERR(priv->thermal)) {
287 result = PTR_ERR(priv->thermal); 282 result = PTR_ERR(priv->thermal);
288 goto free_trt; 283 goto free_art_trt;
289 } 284 }
290 285
291 priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add( 286 priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add(
@@ -299,9 +294,8 @@ static int int3400_thermal_probe(struct platform_device *pdev)
299 294
300free_zone: 295free_zone:
301 thermal_zone_device_unregister(priv->thermal); 296 thermal_zone_device_unregister(priv->thermal);
302free_trt: 297free_art_trt:
303 kfree(priv->trts); 298 kfree(priv->trts);
304free_art:
305 kfree(priv->arts); 299 kfree(priv->arts);
306free_priv: 300free_priv:
307 kfree(priv); 301 kfree(priv);
diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c
index c5cbc3af3a05..69df3d960303 100644
--- a/drivers/thermal/int340x_thermal/int3402_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3402_thermal.c
@@ -14,152 +14,39 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/acpi.h> 15#include <linux/acpi.h>
16#include <linux/thermal.h> 16#include <linux/thermal.h>
17#include "int340x_thermal_zone.h"
17 18
18#define ACPI_ACTIVE_COOLING_MAX_NR 10 19#define INT3402_PERF_CHANGED_EVENT 0x80
19 20#define INT3402_THERMAL_EVENT 0x90
20struct active_trip {
21 unsigned long temp;
22 int id;
23 bool valid;
24};
25 21
26struct int3402_thermal_data { 22struct int3402_thermal_data {
27 unsigned long *aux_trips;
28 int aux_trip_nr;
29 unsigned long psv_temp;
30 int psv_trip_id;
31 unsigned long crt_temp;
32 int crt_trip_id;
33 unsigned long hot_temp;
34 int hot_trip_id;
35 struct active_trip act_trips[ACPI_ACTIVE_COOLING_MAX_NR];
36 acpi_handle *handle; 23 acpi_handle *handle;
24 struct int34x_thermal_zone *int340x_zone;
37}; 25};
38 26
39static int int3402_thermal_get_zone_temp(struct thermal_zone_device *zone, 27static void int3402_notify(acpi_handle handle, u32 event, void *data)
40 unsigned long *temp)
41{
42 struct int3402_thermal_data *d = zone->devdata;
43 unsigned long long tmp;
44 acpi_status status;
45
46 status = acpi_evaluate_integer(d->handle, "_TMP", NULL, &tmp);
47 if (ACPI_FAILURE(status))
48 return -ENODEV;
49
50 /* _TMP returns the temperature in tenths of degrees Kelvin */
51 *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp);
52
53 return 0;
54}
55
56static int int3402_thermal_get_trip_temp(struct thermal_zone_device *zone,
57 int trip, unsigned long *temp)
58{ 28{
59 struct int3402_thermal_data *d = zone->devdata; 29 struct int3402_thermal_data *priv = data;
60 int i; 30
61 31 if (!priv)
62 if (trip < d->aux_trip_nr) 32 return;
63 *temp = d->aux_trips[trip]; 33
64 else if (trip == d->crt_trip_id) 34 switch (event) {
65 *temp = d->crt_temp; 35 case INT3402_PERF_CHANGED_EVENT:
66 else if (trip == d->psv_trip_id) 36 break;
67 *temp = d->psv_temp; 37 case INT3402_THERMAL_EVENT:
68 else if (trip == d->hot_trip_id) 38 int340x_thermal_zone_device_update(priv->int340x_zone);
69 *temp = d->hot_temp; 39 break;
70 else { 40 default:
71 for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) { 41 break;
72 if (d->act_trips[i].valid &&
73 d->act_trips[i].id == trip) {
74 *temp = d->act_trips[i].temp;
75 break;
76 }
77 }
78 if (i == ACPI_ACTIVE_COOLING_MAX_NR)
79 return -EINVAL;
80 } 42 }
81 return 0;
82}
83
84static int int3402_thermal_get_trip_type(struct thermal_zone_device *zone,
85 int trip, enum thermal_trip_type *type)
86{
87 struct int3402_thermal_data *d = zone->devdata;
88 int i;
89
90 if (trip < d->aux_trip_nr)
91 *type = THERMAL_TRIP_PASSIVE;
92 else if (trip == d->crt_trip_id)
93 *type = THERMAL_TRIP_CRITICAL;
94 else if (trip == d->hot_trip_id)
95 *type = THERMAL_TRIP_HOT;
96 else if (trip == d->psv_trip_id)
97 *type = THERMAL_TRIP_PASSIVE;
98 else {
99 for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
100 if (d->act_trips[i].valid &&
101 d->act_trips[i].id == trip) {
102 *type = THERMAL_TRIP_ACTIVE;
103 break;
104 }
105 }
106 if (i == ACPI_ACTIVE_COOLING_MAX_NR)
107 return -EINVAL;
108 }
109 return 0;
110}
111
112static int int3402_thermal_set_trip_temp(struct thermal_zone_device *zone, int trip,
113 unsigned long temp)
114{
115 struct int3402_thermal_data *d = zone->devdata;
116 acpi_status status;
117 char name[10];
118
119 snprintf(name, sizeof(name), "PAT%d", trip);
120 status = acpi_execute_simple_method(d->handle, name,
121 MILLICELSIUS_TO_DECI_KELVIN(temp));
122 if (ACPI_FAILURE(status))
123 return -EIO;
124
125 d->aux_trips[trip] = temp;
126 return 0;
127}
128
129static struct thermal_zone_device_ops int3402_thermal_zone_ops = {
130 .get_temp = int3402_thermal_get_zone_temp,
131 .get_trip_temp = int3402_thermal_get_trip_temp,
132 .get_trip_type = int3402_thermal_get_trip_type,
133 .set_trip_temp = int3402_thermal_set_trip_temp,
134};
135
136static struct thermal_zone_params int3402_thermal_params = {
137 .governor_name = "user_space",
138 .no_hwmon = true,
139};
140
141static int int3402_thermal_get_temp(acpi_handle handle, char *name,
142 unsigned long *temp)
143{
144 unsigned long long r;
145 acpi_status status;
146
147 status = acpi_evaluate_integer(handle, name, NULL, &r);
148 if (ACPI_FAILURE(status))
149 return -EIO;
150
151 *temp = DECI_KELVIN_TO_MILLICELSIUS(r);
152 return 0;
153} 43}
154 44
155static int int3402_thermal_probe(struct platform_device *pdev) 45static int int3402_thermal_probe(struct platform_device *pdev)
156{ 46{
157 struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); 47 struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
158 struct int3402_thermal_data *d; 48 struct int3402_thermal_data *d;
159 struct thermal_zone_device *zone; 49 int ret;
160 acpi_status status;
161 unsigned long long trip_cnt;
162 int trip_mask = 0, i;
163 50
164 if (!acpi_has_method(adev->handle, "_TMP")) 51 if (!acpi_has_method(adev->handle, "_TMP"))
165 return -ENODEV; 52 return -ENODEV;
@@ -168,54 +55,33 @@ static int int3402_thermal_probe(struct platform_device *pdev)
168 if (!d) 55 if (!d)
169 return -ENOMEM; 56 return -ENOMEM;
170 57
171 status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt); 58 d->int340x_zone = int340x_thermal_zone_add(adev, NULL);
172 if (ACPI_FAILURE(status)) 59 if (IS_ERR(d->int340x_zone))
173 trip_cnt = 0; 60 return PTR_ERR(d->int340x_zone);
174 else { 61
175 d->aux_trips = devm_kzalloc(&pdev->dev, 62 ret = acpi_install_notify_handler(adev->handle,
176 sizeof(*d->aux_trips) * trip_cnt, GFP_KERNEL); 63 ACPI_DEVICE_NOTIFY,
177 if (!d->aux_trips) 64 int3402_notify,
178 return -ENOMEM; 65 d);
179 trip_mask = trip_cnt - 1; 66 if (ret) {
180 d->handle = adev->handle; 67 int340x_thermal_zone_remove(d->int340x_zone);
181 d->aux_trip_nr = trip_cnt; 68 return ret;
182 }
183
184 d->crt_trip_id = -1;
185 if (!int3402_thermal_get_temp(adev->handle, "_CRT", &d->crt_temp))
186 d->crt_trip_id = trip_cnt++;
187 d->hot_trip_id = -1;
188 if (!int3402_thermal_get_temp(adev->handle, "_HOT", &d->hot_temp))
189 d->hot_trip_id = trip_cnt++;
190 d->psv_trip_id = -1;
191 if (!int3402_thermal_get_temp(adev->handle, "_PSV", &d->psv_temp))
192 d->psv_trip_id = trip_cnt++;
193 for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
194 char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
195 if (int3402_thermal_get_temp(adev->handle, name,
196 &d->act_trips[i].temp))
197 break;
198 d->act_trips[i].id = trip_cnt++;
199 d->act_trips[i].valid = true;
200 } 69 }
201 70
202 zone = thermal_zone_device_register(acpi_device_bid(adev), trip_cnt, 71 d->handle = adev->handle;
203 trip_mask, d, 72 platform_set_drvdata(pdev, d);
204 &int3402_thermal_zone_ops,
205 &int3402_thermal_params,
206 0, 0);
207 if (IS_ERR(zone))
208 return PTR_ERR(zone);
209 platform_set_drvdata(pdev, zone);
210 73
211 return 0; 74 return 0;
212} 75}
213 76
214static int int3402_thermal_remove(struct platform_device *pdev) 77static int int3402_thermal_remove(struct platform_device *pdev)
215{ 78{
216 struct thermal_zone_device *zone = platform_get_drvdata(pdev); 79 struct int3402_thermal_data *d = platform_get_drvdata(pdev);
80
81 acpi_remove_notify_handler(d->handle,
82 ACPI_DEVICE_NOTIFY, int3402_notify);
83 int340x_thermal_zone_remove(d->int340x_zone);
217 84
218 thermal_zone_device_unregister(zone);
219 return 0; 85 return 0;
220} 86}
221 87
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index 0faf500d8a77..50a7a08e3a15 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -19,6 +19,7 @@
19#include <linux/acpi.h> 19#include <linux/acpi.h>
20#include <linux/thermal.h> 20#include <linux/thermal.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include "int340x_thermal_zone.h"
22 23
23#define INT3403_TYPE_SENSOR 0x03 24#define INT3403_TYPE_SENSOR 0x03
24#define INT3403_TYPE_CHARGER 0x0B 25#define INT3403_TYPE_CHARGER 0x0B
@@ -26,18 +27,9 @@
26#define INT3403_PERF_CHANGED_EVENT 0x80 27#define INT3403_PERF_CHANGED_EVENT 0x80
27#define INT3403_THERMAL_EVENT 0x90 28#define INT3403_THERMAL_EVENT 0x90
28 29
29#define DECI_KELVIN_TO_MILLI_CELSIUS(t, off) (((t) - (off)) * 100) 30/* Preserved structure for future expandbility */
30#define KELVIN_OFFSET 2732
31#define MILLI_CELSIUS_TO_DECI_KELVIN(t, off) (((t) / 100) + (off))
32
33struct int3403_sensor { 31struct int3403_sensor {
34 struct thermal_zone_device *tzone; 32 struct int34x_thermal_zone *int340x_zone;
35 unsigned long *thresholds;
36 unsigned long crit_temp;
37 int crit_trip_id;
38 unsigned long psv_temp;
39 int psv_trip_id;
40
41}; 33};
42 34
43struct int3403_performance_state { 35struct int3403_performance_state {
@@ -63,126 +55,6 @@ struct int3403_priv {
63 void *priv; 55 void *priv;
64}; 56};
65 57
66static int sys_get_curr_temp(struct thermal_zone_device *tzone,
67 unsigned long *temp)
68{
69 struct int3403_priv *priv = tzone->devdata;
70 struct acpi_device *device = priv->adev;
71 unsigned long long tmp;
72 acpi_status status;
73
74 status = acpi_evaluate_integer(device->handle, "_TMP", NULL, &tmp);
75 if (ACPI_FAILURE(status))
76 return -EIO;
77
78 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(tmp, KELVIN_OFFSET);
79
80 return 0;
81}
82
83static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
84 int trip, unsigned long *temp)
85{
86 struct int3403_priv *priv = tzone->devdata;
87 struct acpi_device *device = priv->adev;
88 unsigned long long hyst;
89 acpi_status status;
90
91 status = acpi_evaluate_integer(device->handle, "GTSH", NULL, &hyst);
92 if (ACPI_FAILURE(status))
93 return -EIO;
94
95 /*
96 * Thermal hysteresis represents a temperature difference.
97 * Kelvin and Celsius have same degree size. So the
98 * conversion here between tenths of degree Kelvin unit
99 * and Milli-Celsius unit is just to multiply 100.
100 */
101 *temp = hyst * 100;
102
103 return 0;
104}
105
106static int sys_get_trip_temp(struct thermal_zone_device *tzone,
107 int trip, unsigned long *temp)
108{
109 struct int3403_priv *priv = tzone->devdata;
110 struct int3403_sensor *obj = priv->priv;
111
112 if (priv->type != INT3403_TYPE_SENSOR || !obj)
113 return -EINVAL;
114
115 if (trip == obj->crit_trip_id)
116 *temp = obj->crit_temp;
117 else if (trip == obj->psv_trip_id)
118 *temp = obj->psv_temp;
119 else {
120 /*
121 * get_trip_temp is a mandatory callback but
122 * PATx method doesn't return any value, so return
123 * cached value, which was last set from user space
124 */
125 *temp = obj->thresholds[trip];
126 }
127
128 return 0;
129}
130
131static int sys_get_trip_type(struct thermal_zone_device *thermal,
132 int trip, enum thermal_trip_type *type)
133{
134 struct int3403_priv *priv = thermal->devdata;
135 struct int3403_sensor *obj = priv->priv;
136
137 /* Mandatory callback, may not mean much here */
138 if (trip == obj->crit_trip_id)
139 *type = THERMAL_TRIP_CRITICAL;
140 else
141 *type = THERMAL_TRIP_PASSIVE;
142
143 return 0;
144}
145
146int sys_set_trip_temp(struct thermal_zone_device *tzone, int trip,
147 unsigned long temp)
148{
149 struct int3403_priv *priv = tzone->devdata;
150 struct acpi_device *device = priv->adev;
151 struct int3403_sensor *obj = priv->priv;
152 acpi_status status;
153 char name[10];
154 int ret = 0;
155
156 snprintf(name, sizeof(name), "PAT%d", trip);
157 if (acpi_has_method(device->handle, name)) {
158 status = acpi_execute_simple_method(device->handle, name,
159 MILLI_CELSIUS_TO_DECI_KELVIN(temp,
160 KELVIN_OFFSET));
161 if (ACPI_FAILURE(status))
162 ret = -EIO;
163 else
164 obj->thresholds[trip] = temp;
165 } else {
166 ret = -EIO;
167 dev_err(&device->dev, "sys_set_trip_temp: method not found\n");
168 }
169
170 return ret;
171}
172
173static struct thermal_zone_device_ops tzone_ops = {
174 .get_temp = sys_get_curr_temp,
175 .get_trip_temp = sys_get_trip_temp,
176 .get_trip_type = sys_get_trip_type,
177 .set_trip_temp = sys_set_trip_temp,
178 .get_trip_hyst = sys_get_trip_hyst,
179};
180
181static struct thermal_zone_params int3403_thermal_params = {
182 .governor_name = "user_space",
183 .no_hwmon = true,
184};
185
186static void int3403_notify(acpi_handle handle, 58static void int3403_notify(acpi_handle handle,
187 u32 event, void *data) 59 u32 event, void *data)
188{ 60{
@@ -200,7 +72,7 @@ static void int3403_notify(acpi_handle handle,
200 case INT3403_PERF_CHANGED_EVENT: 72 case INT3403_PERF_CHANGED_EVENT:
201 break; 73 break;
202 case INT3403_THERMAL_EVENT: 74 case INT3403_THERMAL_EVENT:
203 thermal_zone_device_update(obj->tzone); 75 int340x_thermal_zone_device_update(obj->int340x_zone);
204 break; 76 break;
205 default: 77 default:
206 dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); 78 dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
@@ -208,41 +80,10 @@ static void int3403_notify(acpi_handle handle,
208 } 80 }
209} 81}
210 82
211static int sys_get_trip_crt(struct acpi_device *device, unsigned long *temp)
212{
213 unsigned long long crt;
214 acpi_status status;
215
216 status = acpi_evaluate_integer(device->handle, "_CRT", NULL, &crt);
217 if (ACPI_FAILURE(status))
218 return -EIO;
219
220 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(crt, KELVIN_OFFSET);
221
222 return 0;
223}
224
225static int sys_get_trip_psv(struct acpi_device *device, unsigned long *temp)
226{
227 unsigned long long psv;
228 acpi_status status;
229
230 status = acpi_evaluate_integer(device->handle, "_PSV", NULL, &psv);
231 if (ACPI_FAILURE(status))
232 return -EIO;
233
234 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(psv, KELVIN_OFFSET);
235
236 return 0;
237}
238
239static int int3403_sensor_add(struct int3403_priv *priv) 83static int int3403_sensor_add(struct int3403_priv *priv)
240{ 84{
241 int result = 0; 85 int result = 0;
242 acpi_status status;
243 struct int3403_sensor *obj; 86 struct int3403_sensor *obj;
244 unsigned long long trip_cnt;
245 int trip_mask = 0;
246 87
247 obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL); 88 obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL);
248 if (!obj) 89 if (!obj)
@@ -250,39 +91,9 @@ static int int3403_sensor_add(struct int3403_priv *priv)
250 91
251 priv->priv = obj; 92 priv->priv = obj;
252 93
253 status = acpi_evaluate_integer(priv->adev->handle, "PATC", NULL, 94 obj->int340x_zone = int340x_thermal_zone_add(priv->adev, NULL);
254 &trip_cnt); 95 if (IS_ERR(obj->int340x_zone))
255 if (ACPI_FAILURE(status)) 96 return PTR_ERR(obj->int340x_zone);
256 trip_cnt = 0;
257
258 if (trip_cnt) {
259 /* We have to cache, thresholds can't be readback */
260 obj->thresholds = devm_kzalloc(&priv->pdev->dev,
261 sizeof(*obj->thresholds) * trip_cnt,
262 GFP_KERNEL);
263 if (!obj->thresholds) {
264 result = -ENOMEM;
265 goto err_free_obj;
266 }
267 trip_mask = BIT(trip_cnt) - 1;
268 }
269
270 obj->psv_trip_id = -1;
271 if (!sys_get_trip_psv(priv->adev, &obj->psv_temp))
272 obj->psv_trip_id = trip_cnt++;
273
274 obj->crit_trip_id = -1;
275 if (!sys_get_trip_crt(priv->adev, &obj->crit_temp))
276 obj->crit_trip_id = trip_cnt++;
277
278 obj->tzone = thermal_zone_device_register(acpi_device_bid(priv->adev),
279 trip_cnt, trip_mask, priv, &tzone_ops,
280 &int3403_thermal_params, 0, 0);
281 if (IS_ERR(obj->tzone)) {
282 result = PTR_ERR(obj->tzone);
283 obj->tzone = NULL;
284 goto err_free_obj;
285 }
286 97
287 result = acpi_install_notify_handler(priv->adev->handle, 98 result = acpi_install_notify_handler(priv->adev->handle,
288 ACPI_DEVICE_NOTIFY, int3403_notify, 99 ACPI_DEVICE_NOTIFY, int3403_notify,
@@ -293,7 +104,7 @@ static int int3403_sensor_add(struct int3403_priv *priv)
293 return 0; 104 return 0;
294 105
295 err_free_obj: 106 err_free_obj:
296 thermal_zone_device_unregister(obj->tzone); 107 int340x_thermal_zone_remove(obj->int340x_zone);
297 return result; 108 return result;
298} 109}
299 110
@@ -303,7 +114,8 @@ static int int3403_sensor_remove(struct int3403_priv *priv)
303 114
304 acpi_remove_notify_handler(priv->adev->handle, 115 acpi_remove_notify_handler(priv->adev->handle,
305 ACPI_DEVICE_NOTIFY, int3403_notify); 116 ACPI_DEVICE_NOTIFY, int3403_notify);
306 thermal_zone_device_unregister(obj->tzone); 117 int340x_thermal_zone_remove(obj->int340x_zone);
118
307 return 0; 119 return 0;
308} 120}
309 121
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
new file mode 100644
index 000000000000..f88b08877025
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
@@ -0,0 +1,276 @@
1/*
2 * int340x_thermal_zone.c
3 * Copyright (c) 2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/acpi.h>
19#include <linux/thermal.h>
20#include "int340x_thermal_zone.h"
21
22static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
23 unsigned long *temp)
24{
25 struct int34x_thermal_zone *d = zone->devdata;
26 unsigned long long tmp;
27 acpi_status status;
28
29 if (d->override_ops && d->override_ops->get_temp)
30 return d->override_ops->get_temp(zone, temp);
31
32 status = acpi_evaluate_integer(d->adev->handle, "_TMP", NULL, &tmp);
33 if (ACPI_FAILURE(status))
34 return -EIO;
35
36 if (d->lpat_table) {
37 int conv_temp;
38
39 conv_temp = acpi_lpat_raw_to_temp(d->lpat_table, (int)tmp);
40 if (conv_temp < 0)
41 return conv_temp;
42
43 *temp = (unsigned long)conv_temp * 10;
44 } else
45 /* _TMP returns the temperature in tenths of degrees Kelvin */
46 *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp);
47
48 return 0;
49}
50
51static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
52 int trip, unsigned long *temp)
53{
54 struct int34x_thermal_zone *d = zone->devdata;
55 int i;
56
57 if (d->override_ops && d->override_ops->get_trip_temp)
58 return d->override_ops->get_trip_temp(zone, trip, temp);
59
60 if (trip < d->aux_trip_nr)
61 *temp = d->aux_trips[trip];
62 else if (trip == d->crt_trip_id)
63 *temp = d->crt_temp;
64 else if (trip == d->psv_trip_id)
65 *temp = d->psv_temp;
66 else if (trip == d->hot_trip_id)
67 *temp = d->hot_temp;
68 else {
69 for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
70 if (d->act_trips[i].valid &&
71 d->act_trips[i].id == trip) {
72 *temp = d->act_trips[i].temp;
73 break;
74 }
75 }
76 if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
77 return -EINVAL;
78 }
79
80 return 0;
81}
82
83static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
84 int trip,
85 enum thermal_trip_type *type)
86{
87 struct int34x_thermal_zone *d = zone->devdata;
88 int i;
89
90 if (d->override_ops && d->override_ops->get_trip_type)
91 return d->override_ops->get_trip_type(zone, trip, type);
92
93 if (trip < d->aux_trip_nr)
94 *type = THERMAL_TRIP_PASSIVE;
95 else if (trip == d->crt_trip_id)
96 *type = THERMAL_TRIP_CRITICAL;
97 else if (trip == d->hot_trip_id)
98 *type = THERMAL_TRIP_HOT;
99 else if (trip == d->psv_trip_id)
100 *type = THERMAL_TRIP_PASSIVE;
101 else {
102 for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
103 if (d->act_trips[i].valid &&
104 d->act_trips[i].id == trip) {
105 *type = THERMAL_TRIP_ACTIVE;
106 break;
107 }
108 }
109 if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
110 return -EINVAL;
111 }
112
113 return 0;
114}
115
116static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
117 int trip, unsigned long temp)
118{
119 struct int34x_thermal_zone *d = zone->devdata;
120 acpi_status status;
121 char name[10];
122
123 if (d->override_ops && d->override_ops->set_trip_temp)
124 return d->override_ops->set_trip_temp(zone, trip, temp);
125
126 snprintf(name, sizeof(name), "PAT%d", trip);
127 status = acpi_execute_simple_method(d->adev->handle, name,
128 MILLICELSIUS_TO_DECI_KELVIN(temp));
129 if (ACPI_FAILURE(status))
130 return -EIO;
131
132 d->aux_trips[trip] = temp;
133
134 return 0;
135}
136
137
138static int int340x_thermal_get_trip_hyst(struct thermal_zone_device *zone,
139 int trip, unsigned long *temp)
140{
141 struct int34x_thermal_zone *d = zone->devdata;
142 acpi_status status;
143 unsigned long long hyst;
144
145 if (d->override_ops && d->override_ops->get_trip_hyst)
146 return d->override_ops->get_trip_hyst(zone, trip, temp);
147
148 status = acpi_evaluate_integer(d->adev->handle, "GTSH", NULL, &hyst);
149 if (ACPI_FAILURE(status))
150 return -EIO;
151
152 *temp = hyst * 100;
153
154 return 0;
155}
156
157static struct thermal_zone_device_ops int340x_thermal_zone_ops = {
158 .get_temp = int340x_thermal_get_zone_temp,
159 .get_trip_temp = int340x_thermal_get_trip_temp,
160 .get_trip_type = int340x_thermal_get_trip_type,
161 .set_trip_temp = int340x_thermal_set_trip_temp,
162 .get_trip_hyst = int340x_thermal_get_trip_hyst,
163};
164
165static int int340x_thermal_get_trip_config(acpi_handle handle, char *name,
166 unsigned long *temp)
167{
168 unsigned long long r;
169 acpi_status status;
170
171 status = acpi_evaluate_integer(handle, name, NULL, &r);
172 if (ACPI_FAILURE(status))
173 return -EIO;
174
175 *temp = DECI_KELVIN_TO_MILLICELSIUS(r);
176
177 return 0;
178}
179
180static struct thermal_zone_params int340x_thermal_params = {
181 .governor_name = "user_space",
182 .no_hwmon = true,
183};
184
185struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
186 struct thermal_zone_device_ops *override_ops)
187{
188 struct int34x_thermal_zone *int34x_thermal_zone;
189 acpi_status status;
190 unsigned long long trip_cnt;
191 int trip_mask = 0, i;
192 int ret;
193
194 int34x_thermal_zone = kzalloc(sizeof(*int34x_thermal_zone),
195 GFP_KERNEL);
196 if (!int34x_thermal_zone)
197 return ERR_PTR(-ENOMEM);
198
199 int34x_thermal_zone->adev = adev;
200 int34x_thermal_zone->override_ops = override_ops;
201
202 status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt);
203 if (ACPI_FAILURE(status))
204 trip_cnt = 0;
205 else {
206 int34x_thermal_zone->aux_trips = kzalloc(
207 sizeof(*int34x_thermal_zone->aux_trips) *
208 trip_cnt, GFP_KERNEL);
209 if (!int34x_thermal_zone->aux_trips) {
210 ret = -ENOMEM;
211 goto free_mem;
212 }
213 trip_mask = BIT(trip_cnt) - 1;
214 int34x_thermal_zone->aux_trip_nr = trip_cnt;
215 }
216
217 int34x_thermal_zone->crt_trip_id = -1;
218 if (!int340x_thermal_get_trip_config(adev->handle, "_CRT",
219 &int34x_thermal_zone->crt_temp))
220 int34x_thermal_zone->crt_trip_id = trip_cnt++;
221 int34x_thermal_zone->hot_trip_id = -1;
222 if (!int340x_thermal_get_trip_config(adev->handle, "_HOT",
223 &int34x_thermal_zone->hot_temp))
224 int34x_thermal_zone->hot_trip_id = trip_cnt++;
225 int34x_thermal_zone->psv_trip_id = -1;
226 if (!int340x_thermal_get_trip_config(adev->handle, "_PSV",
227 &int34x_thermal_zone->psv_temp))
228 int34x_thermal_zone->psv_trip_id = trip_cnt++;
229 for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
230 char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
231
232 if (int340x_thermal_get_trip_config(adev->handle, name,
233 &int34x_thermal_zone->act_trips[i].temp))
234 break;
235
236 int34x_thermal_zone->act_trips[i].id = trip_cnt++;
237 int34x_thermal_zone->act_trips[i].valid = true;
238 }
239 int34x_thermal_zone->lpat_table = acpi_lpat_get_conversion_table(
240 adev->handle);
241
242 int34x_thermal_zone->zone = thermal_zone_device_register(
243 acpi_device_bid(adev),
244 trip_cnt,
245 trip_mask, int34x_thermal_zone,
246 &int340x_thermal_zone_ops,
247 &int340x_thermal_params,
248 0, 0);
249 if (IS_ERR(int34x_thermal_zone->zone)) {
250 ret = PTR_ERR(int34x_thermal_zone->zone);
251 goto free_lpat;
252 }
253
254 return int34x_thermal_zone;
255
256free_lpat:
257 acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
258free_mem:
259 kfree(int34x_thermal_zone);
260 return ERR_PTR(ret);
261}
262EXPORT_SYMBOL_GPL(int340x_thermal_zone_add);
263
264void int340x_thermal_zone_remove(struct int34x_thermal_zone
265 *int34x_thermal_zone)
266{
267 thermal_zone_device_unregister(int34x_thermal_zone->zone);
268 acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
269 kfree(int34x_thermal_zone);
270}
271EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
272
273MODULE_AUTHOR("Aaron Lu <aaron.lu@intel.com>");
274MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
275MODULE_DESCRIPTION("Intel INT340x common thermal zone handler");
276MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
new file mode 100644
index 000000000000..9f38ab72c4bf
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
@@ -0,0 +1,68 @@
1/*
2 * int340x_thermal_zone.h
3 * Copyright (c) 2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#ifndef __INT340X_THERMAL_ZONE_H__
17#define __INT340X_THERMAL_ZONE_H__
18
19#include <acpi/acpi_lpat.h>
20
21#define INT340X_THERMAL_MAX_ACT_TRIP_COUNT 10
22
23struct active_trip {
24 unsigned long temp;
25 int id;
26 bool valid;
27};
28
29struct int34x_thermal_zone {
30 struct acpi_device *adev;
31 struct active_trip act_trips[INT340X_THERMAL_MAX_ACT_TRIP_COUNT];
32 unsigned long *aux_trips;
33 int aux_trip_nr;
34 unsigned long psv_temp;
35 int psv_trip_id;
36 unsigned long crt_temp;
37 int crt_trip_id;
38 unsigned long hot_temp;
39 int hot_trip_id;
40 struct thermal_zone_device *zone;
41 struct thermal_zone_device_ops *override_ops;
42 void *priv_data;
43 struct acpi_lpat_conversion_table *lpat_table;
44};
45
46struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
47 struct thermal_zone_device_ops *override_ops);
48void int340x_thermal_zone_remove(struct int34x_thermal_zone *);
49
50static inline void int340x_thermal_zone_set_priv_data(
51 struct int34x_thermal_zone *tzone, void *priv_data)
52{
53 tzone->priv_data = priv_data;
54}
55
56static inline void *int340x_thermal_zone_get_priv_data(
57 struct int34x_thermal_zone *tzone)
58{
59 return tzone->priv_data;
60}
61
62static inline void int340x_thermal_zone_device_update(
63 struct int34x_thermal_zone *tzone)
64{
65 thermal_zone_device_update(tzone->zone);
66}
67
68#endif
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 0fe5dbbea968..5e8d8e91ea6d 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -18,6 +18,8 @@
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/acpi.h> 20#include <linux/acpi.h>
21#include <linux/thermal.h>
22#include "int340x_thermal_zone.h"
21 23
22/* Broadwell-U/HSB thermal reporting device */ 24/* Broadwell-U/HSB thermal reporting device */
23#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603 25#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603
@@ -39,6 +41,7 @@ struct proc_thermal_device {
39 struct device *dev; 41 struct device *dev;
40 struct acpi_device *adev; 42 struct acpi_device *adev;
41 struct power_config power_limits[2]; 43 struct power_config power_limits[2];
44 struct int34x_thermal_zone *int340x_zone;
42}; 45};
43 46
44enum proc_thermal_emum_mode_type { 47enum proc_thermal_emum_mode_type {
@@ -117,6 +120,72 @@ static struct attribute_group power_limit_attribute_group = {
117 .name = "power_limits" 120 .name = "power_limits"
118}; 121};
119 122
123static int stored_tjmax; /* since it is fixed, we can have local storage */
124
125static int get_tjmax(void)
126{
127 u32 eax, edx;
128 u32 val;
129 int err;
130
131 err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
132 if (err)
133 return err;
134
135 val = (eax >> 16) & 0xff;
136 if (val)
137 return val;
138
139 return -EINVAL;
140}
141
142static int read_temp_msr(unsigned long *temp)
143{
144 int cpu;
145 u32 eax, edx;
146 int err;
147 unsigned long curr_temp_off = 0;
148
149 *temp = 0;
150
151 for_each_online_cpu(cpu) {
152 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_THERM_STATUS, &eax,
153 &edx);
154 if (err)
155 goto err_ret;
156 else {
157 if (eax & 0x80000000) {
158 curr_temp_off = (eax >> 16) & 0x7f;
159 if (!*temp || curr_temp_off < *temp)
160 *temp = curr_temp_off;
161 } else {
162 err = -EINVAL;
163 goto err_ret;
164 }
165 }
166 }
167
168 return 0;
169err_ret:
170 return err;
171}
172
173static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
174 unsigned long *temp)
175{
176 int ret;
177
178 ret = read_temp_msr(temp);
179 if (!ret)
180 *temp = (stored_tjmax - *temp) * 1000;
181
182 return ret;
183}
184
185static struct thermal_zone_device_ops proc_thermal_local_ops = {
186 .get_temp = proc_thermal_get_zone_temp,
187};
188
120static int proc_thermal_add(struct device *dev, 189static int proc_thermal_add(struct device *dev,
121 struct proc_thermal_device **priv) 190 struct proc_thermal_device **priv)
122{ 191{
@@ -126,6 +195,8 @@ static int proc_thermal_add(struct device *dev,
126 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 195 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
127 union acpi_object *elements, *ppcc; 196 union acpi_object *elements, *ppcc;
128 union acpi_object *p; 197 union acpi_object *p;
198 unsigned long long tmp;
199 struct thermal_zone_device_ops *ops = NULL;
129 int i; 200 int i;
130 int ret; 201 int ret;
131 202
@@ -178,6 +249,24 @@ static int proc_thermal_add(struct device *dev,
178 249
179 ret = sysfs_create_group(&dev->kobj, 250 ret = sysfs_create_group(&dev->kobj,
180 &power_limit_attribute_group); 251 &power_limit_attribute_group);
252 if (ret)
253 goto free_buffer;
254
255 status = acpi_evaluate_integer(adev->handle, "_TMP", NULL, &tmp);
256 if (ACPI_FAILURE(status)) {
257 /* there is no _TMP method, add local method */
258 stored_tjmax = get_tjmax();
259 if (stored_tjmax > 0)
260 ops = &proc_thermal_local_ops;
261 }
262
263 proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
264 if (IS_ERR(proc_priv->int340x_zone)) {
265 sysfs_remove_group(&proc_priv->dev->kobj,
266 &power_limit_attribute_group);
267 ret = PTR_ERR(proc_priv->int340x_zone);
268 } else
269 ret = 0;
181 270
182free_buffer: 271free_buffer:
183 kfree(buf.pointer); 272 kfree(buf.pointer);
@@ -185,8 +274,9 @@ free_buffer:
185 return ret; 274 return ret;
186} 275}
187 276
188void proc_thermal_remove(struct proc_thermal_device *proc_priv) 277static void proc_thermal_remove(struct proc_thermal_device *proc_priv)
189{ 278{
279 int340x_thermal_zone_remove(proc_priv->int340x_zone);
190 sysfs_remove_group(&proc_priv->dev->kobj, 280 sysfs_remove_group(&proc_priv->dev->kobj,
191 &power_limit_attribute_group); 281 &power_limit_attribute_group);
192} 282}
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 6ceebd659dd4..12623bc02f46 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -688,6 +688,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
688 { X86_VENDOR_INTEL, 6, 0x45}, 688 { X86_VENDOR_INTEL, 6, 0x45},
689 { X86_VENDOR_INTEL, 6, 0x46}, 689 { X86_VENDOR_INTEL, 6, 0x46},
690 { X86_VENDOR_INTEL, 6, 0x4c}, 690 { X86_VENDOR_INTEL, 6, 0x4c},
691 { X86_VENDOR_INTEL, 6, 0x4d},
691 { X86_VENDOR_INTEL, 6, 0x56}, 692 { X86_VENDOR_INTEL, 6, 0x56},
692 {} 693 {}
693}; 694};
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
index 5580f5b24eb9..9013505e43b7 100644
--- a/drivers/thermal/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -309,10 +309,13 @@ static int soc_dts_enable(int id)
309 return ret; 309 return ret;
310} 310}
311 311
312static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max) 312static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max,
313 bool notification_support)
313{ 314{
314 struct soc_sensor_entry *aux_entry; 315 struct soc_sensor_entry *aux_entry;
315 char name[10]; 316 char name[10];
317 int trip_count = 0;
318 int trip_mask = 0;
316 int err; 319 int err;
317 320
318 aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL); 321 aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL);
@@ -332,11 +335,16 @@ static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max)
332 aux_entry->tj_max = tj_max; 335 aux_entry->tj_max = tj_max;
333 aux_entry->temp_mask = 0x00FF << (id * 8); 336 aux_entry->temp_mask = 0x00FF << (id * 8);
334 aux_entry->temp_shift = id * 8; 337 aux_entry->temp_shift = id * 8;
338 if (notification_support) {
339 trip_count = SOC_MAX_DTS_TRIPS;
340 trip_mask = 0x02;
341 }
335 snprintf(name, sizeof(name), "soc_dts%d", id); 342 snprintf(name, sizeof(name), "soc_dts%d", id);
336 aux_entry->tzone = thermal_zone_device_register(name, 343 aux_entry->tzone = thermal_zone_device_register(name,
337 SOC_MAX_DTS_TRIPS, 344 trip_count,
338 0x02, 345 trip_mask,
339 aux_entry, &tzone_ops, NULL, 0, 0); 346 aux_entry, &tzone_ops,
347 NULL, 0, 0);
340 if (IS_ERR(aux_entry->tzone)) { 348 if (IS_ERR(aux_entry->tzone)) {
341 err = PTR_ERR(aux_entry->tzone); 349 err = PTR_ERR(aux_entry->tzone);
342 goto err_ret; 350 goto err_ret;
@@ -402,6 +410,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
402 410
403static const struct x86_cpu_id soc_thermal_ids[] = { 411static const struct x86_cpu_id soc_thermal_ids[] = {
404 { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ}, 412 { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ},
413 { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x4c, 0, 0},
405 {} 414 {}
406}; 415};
407MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids); 416MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
@@ -420,8 +429,11 @@ static int __init intel_soc_thermal_init(void)
420 if (get_tj_max(&tj_max)) 429 if (get_tj_max(&tj_max))
421 return -EINVAL; 430 return -EINVAL;
422 431
432 soc_dts_thres_irq = (int)match_cpu->driver_data;
433
423 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) { 434 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
424 soc_dts[i] = alloc_soc_dts(i, tj_max); 435 soc_dts[i] = alloc_soc_dts(i, tj_max,
436 soc_dts_thres_irq ? true : false);
425 if (IS_ERR(soc_dts[i])) { 437 if (IS_ERR(soc_dts[i])) {
426 err = PTR_ERR(soc_dts[i]); 438 err = PTR_ERR(soc_dts[i]);
427 goto err_free; 439 goto err_free;
@@ -430,15 +442,15 @@ static int __init intel_soc_thermal_init(void)
430 442
431 spin_lock_init(&intr_notify_lock); 443 spin_lock_init(&intr_notify_lock);
432 444
433 soc_dts_thres_irq = (int)match_cpu->driver_data; 445 if (soc_dts_thres_irq) {
434 446 err = request_threaded_irq(soc_dts_thres_irq, NULL,
435 err = request_threaded_irq(soc_dts_thres_irq, NULL, 447 soc_irq_thread_fn,
436 soc_irq_thread_fn, 448 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
437 IRQF_TRIGGER_RISING | IRQF_ONESHOT, 449 "soc_dts", soc_dts);
438 "soc_dts", soc_dts); 450 if (err) {
439 if (err) { 451 pr_err("request_threaded_irq ret %d\n", err);
440 pr_err("request_threaded_irq ret %d\n", err); 452 goto err_free;
441 goto err_free; 453 }
442 } 454 }
443 455
444 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) { 456 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
@@ -451,7 +463,8 @@ static int __init intel_soc_thermal_init(void)
451 463
452err_trip_temp: 464err_trip_temp:
453 i = SOC_MAX_DTS_SENSORS; 465 i = SOC_MAX_DTS_SENSORS;
454 free_irq(soc_dts_thres_irq, soc_dts); 466 if (soc_dts_thres_irq)
467 free_irq(soc_dts_thres_irq, soc_dts);
455err_free: 468err_free:
456 while (--i >= 0) 469 while (--i >= 0)
457 free_soc_dts(soc_dts[i]); 470 free_soc_dts(soc_dts[i]);
@@ -466,7 +479,8 @@ static void __exit intel_soc_thermal_exit(void)
466 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) 479 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
467 update_trip_temp(soc_dts[i], 0, 0); 480 update_trip_temp(soc_dts[i], 0, 0);
468 481
469 free_irq(soc_dts_thres_irq, soc_dts); 482 if (soc_dts_thres_irq)
483 free_irq(soc_dts_thres_irq, soc_dts);
470 484
471 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) 485 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
472 free_soc_dts(soc_dts[i]); 486 free_soc_dts(soc_dts[i]);
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index d717f3dab6f1..668fb1bdea9e 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -497,6 +497,9 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
497 if (sensor_specs.np == sensor_np && id == sensor_id) { 497 if (sensor_specs.np == sensor_np && id == sensor_id) {
498 tzd = thermal_zone_of_add_sensor(child, sensor_np, 498 tzd = thermal_zone_of_add_sensor(child, sensor_np,
499 data, ops); 499 data, ops);
500 if (!IS_ERR(tzd))
501 tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED);
502
500 of_node_put(sensor_specs.np); 503 of_node_put(sensor_specs.np);
501 of_node_put(child); 504 of_node_put(child);
502 goto exit; 505 goto exit;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 2580a4872f90..fe4e767018c4 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -387,21 +387,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
387 387
388 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 388 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
389 if (irq) { 389 if (irq) {
390 int ret;
391
392 /* 390 /*
393 * platform has IRQ support. 391 * platform has IRQ support.
394 * Then, driver uses common registers 392 * Then, driver uses common registers
395 */
396
397 ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
398 dev_name(dev), common);
399 if (ret) {
400 dev_err(dev, "irq request failed\n ");
401 return ret;
402 }
403
404 /*
405 * rcar_has_irq_support() will be enabled 393 * rcar_has_irq_support() will be enabled
406 */ 394 */
407 res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); 395 res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
@@ -456,8 +444,16 @@ static int rcar_thermal_probe(struct platform_device *pdev)
456 } 444 }
457 445
458 /* enable temperature comparation */ 446 /* enable temperature comparation */
459 if (irq) 447 if (irq) {
448 ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
449 dev_name(dev), common);
450 if (ret) {
451 dev_err(dev, "irq request failed\n ");
452 goto error_unregister;
453 }
454
460 rcar_thermal_common_write(common, ENR, enr_bits); 455 rcar_thermal_common_write(common, ENR, enr_bits);
456 }
461 457
462 platform_set_drvdata(pdev, common); 458 platform_set_drvdata(pdev, common);
463 459
@@ -467,9 +463,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
467 463
468error_unregister: 464error_unregister:
469 rcar_thermal_for_each_priv(priv, common) { 465 rcar_thermal_for_each_priv(priv, common) {
470 thermal_zone_device_unregister(priv->zone);
471 if (rcar_has_irq_support(priv)) 466 if (rcar_has_irq_support(priv))
472 rcar_thermal_irq_disable(priv); 467 rcar_thermal_irq_disable(priv);
468 thermal_zone_device_unregister(priv->zone);
473 } 469 }
474 470
475 pm_runtime_put(dev); 471 pm_runtime_put(dev);
@@ -485,9 +481,9 @@ static int rcar_thermal_remove(struct platform_device *pdev)
485 struct rcar_thermal_priv *priv; 481 struct rcar_thermal_priv *priv;
486 482
487 rcar_thermal_for_each_priv(priv, common) { 483 rcar_thermal_for_each_priv(priv, common) {
488 thermal_zone_device_unregister(priv->zone);
489 if (rcar_has_irq_support(priv)) 484 if (rcar_has_irq_support(priv))
490 rcar_thermal_irq_disable(priv); 485 rcar_thermal_irq_disable(priv);
486 thermal_zone_device_unregister(priv->zone);
491 } 487 }
492 488
493 pm_runtime_put(dev); 489 pm_runtime_put(dev);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 9c6ce548e363..3aa46ac7cdbc 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -193,19 +193,20 @@ static u32 rk_tsadcv2_temp_to_code(long temp)
193 193
194static long rk_tsadcv2_code_to_temp(u32 code) 194static long rk_tsadcv2_code_to_temp(u32 code)
195{ 195{
196 int high, low, mid; 196 unsigned int low = 0;
197 197 unsigned int high = ARRAY_SIZE(v2_code_table) - 1;
198 low = 0; 198 unsigned int mid = (low + high) / 2;
199 high = ARRAY_SIZE(v2_code_table) - 1; 199 unsigned int num;
200 mid = (high + low) / 2; 200 unsigned long denom;
201 201
202 if (code > v2_code_table[low].code || code < v2_code_table[high].code) 202 /* Invalid code, return -EAGAIN */
203 return 125000; /* No code available, return max temperature */ 203 if (code > TSADCV2_DATA_MASK)
204 return -EAGAIN;
204 205
205 while (low <= high) { 206 while (low <= high && mid) {
206 if (code >= v2_code_table[mid].code && code < 207 if (code >= v2_code_table[mid].code &&
207 v2_code_table[mid - 1].code) 208 code < v2_code_table[mid - 1].code)
208 return v2_code_table[mid].temp; 209 break;
209 else if (code < v2_code_table[mid].code) 210 else if (code < v2_code_table[mid].code)
210 low = mid + 1; 211 low = mid + 1;
211 else 212 else
@@ -213,7 +214,16 @@ static long rk_tsadcv2_code_to_temp(u32 code)
213 mid = (low + high) / 2; 214 mid = (low + high) / 2;
214 } 215 }
215 216
216 return 125000; 217 /*
218 * The 5C granularity provided by the table is too much. Let's
219 * assume that the relationship between sensor readings and
220 * temperature between 2 table entries is linear and interpolate
221 * to produce less granular result.
222 */
223 num = v2_code_table[mid].temp - v2_code_table[mid - 1].temp;
224 num *= v2_code_table[mid - 1].code - code;
225 denom = v2_code_table[mid - 1].code - v2_code_table[mid].code;
226 return v2_code_table[mid - 1].temp + (num / denom);
217} 227}
218 228
219/** 229/**
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index c43306ecc0ab..c8e35c1a43dc 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -7,12 +7,3 @@ config EXYNOS_THERMAL
7 the TMU, reports temperature and handles cooling action if defined. 7 the TMU, reports temperature and handles cooling action if defined.
8 This driver uses the Exynos core thermal APIs and TMU configuration 8 This driver uses the Exynos core thermal APIs and TMU configuration
9 data from the supported SoCs. 9 data from the supported SoCs.
10
11config EXYNOS_THERMAL_CORE
12 bool "Core thermal framework support for EXYNOS SOCs"
13 depends on EXYNOS_THERMAL
14 help
15 If you say yes here you get support for EXYNOS TMU
16 (Thermal Management Unit) common registration/unregistration
17 functions to the core thermal layer and also to use the generic
18 CPU cooling APIs.
diff --git a/drivers/thermal/samsung/Makefile b/drivers/thermal/samsung/Makefile
index c09d83095dc2..1e47d0d89ce0 100644
--- a/drivers/thermal/samsung/Makefile
+++ b/drivers/thermal/samsung/Makefile
@@ -3,5 +3,3 @@
3# 3#
4obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o 4obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o
5exynos_thermal-y := exynos_tmu.o 5exynos_thermal-y := exynos_tmu.o
6exynos_thermal-y += exynos_tmu_data.o
7exynos_thermal-$(CONFIG_EXYNOS_THERMAL_CORE) += exynos_thermal_common.o
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
deleted file mode 100644
index 6dc3815cc73f..000000000000
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ /dev/null
@@ -1,427 +0,0 @@
1/*
2 * exynos_thermal_common.c - Samsung EXYNOS common thermal file
3 *
4 * Copyright (C) 2013 Samsung Electronics
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/cpu_cooling.h>
24#include <linux/err.h>
25#include <linux/slab.h>
26#include <linux/thermal.h>
27
28#include "exynos_thermal_common.h"
29
30struct exynos_thermal_zone {
31 enum thermal_device_mode mode;
32 struct thermal_zone_device *therm_dev;
33 struct thermal_cooling_device *cool_dev[MAX_COOLING_DEVICE];
34 unsigned int cool_dev_size;
35 struct platform_device *exynos4_dev;
36 struct thermal_sensor_conf *sensor_conf;
37 bool bind;
38};
39
40/* Get mode callback functions for thermal zone */
41static int exynos_get_mode(struct thermal_zone_device *thermal,
42 enum thermal_device_mode *mode)
43{
44 struct exynos_thermal_zone *th_zone = thermal->devdata;
45 if (th_zone)
46 *mode = th_zone->mode;
47 return 0;
48}
49
50/* Set mode callback functions for thermal zone */
51static int exynos_set_mode(struct thermal_zone_device *thermal,
52 enum thermal_device_mode mode)
53{
54 struct exynos_thermal_zone *th_zone = thermal->devdata;
55 if (!th_zone) {
56 dev_err(&thermal->device,
57 "thermal zone not registered\n");
58 return 0;
59 }
60
61 mutex_lock(&thermal->lock);
62
63 if (mode == THERMAL_DEVICE_ENABLED &&
64 !th_zone->sensor_conf->trip_data.trigger_falling)
65 thermal->polling_delay = IDLE_INTERVAL;
66 else
67 thermal->polling_delay = 0;
68
69 mutex_unlock(&thermal->lock);
70
71 th_zone->mode = mode;
72 thermal_zone_device_update(thermal);
73 dev_dbg(th_zone->sensor_conf->dev,
74 "thermal polling set for duration=%d msec\n",
75 thermal->polling_delay);
76 return 0;
77}
78
79
80/* Get trip type callback functions for thermal zone */
81static int exynos_get_trip_type(struct thermal_zone_device *thermal, int trip,
82 enum thermal_trip_type *type)
83{
84 struct exynos_thermal_zone *th_zone = thermal->devdata;
85 int max_trip = th_zone->sensor_conf->trip_data.trip_count;
86 int trip_type;
87
88 if (trip < 0 || trip >= max_trip)
89 return -EINVAL;
90
91 trip_type = th_zone->sensor_conf->trip_data.trip_type[trip];
92
93 if (trip_type == SW_TRIP)
94 *type = THERMAL_TRIP_CRITICAL;
95 else if (trip_type == THROTTLE_ACTIVE)
96 *type = THERMAL_TRIP_ACTIVE;
97 else if (trip_type == THROTTLE_PASSIVE)
98 *type = THERMAL_TRIP_PASSIVE;
99 else
100 return -EINVAL;
101
102 return 0;
103}
104
105/* Get trip temperature callback functions for thermal zone */
106static int exynos_get_trip_temp(struct thermal_zone_device *thermal, int trip,
107 unsigned long *temp)
108{
109 struct exynos_thermal_zone *th_zone = thermal->devdata;
110 int max_trip = th_zone->sensor_conf->trip_data.trip_count;
111
112 if (trip < 0 || trip >= max_trip)
113 return -EINVAL;
114
115 *temp = th_zone->sensor_conf->trip_data.trip_val[trip];
116 /* convert the temperature into millicelsius */
117 *temp = *temp * MCELSIUS;
118
119 return 0;
120}
121
122/* Get critical temperature callback functions for thermal zone */
123static int exynos_get_crit_temp(struct thermal_zone_device *thermal,
124 unsigned long *temp)
125{
126 struct exynos_thermal_zone *th_zone = thermal->devdata;
127 int max_trip = th_zone->sensor_conf->trip_data.trip_count;
128 /* Get the temp of highest trip*/
129 return exynos_get_trip_temp(thermal, max_trip - 1, temp);
130}
131
132/* Bind callback functions for thermal zone */
133static int exynos_bind(struct thermal_zone_device *thermal,
134 struct thermal_cooling_device *cdev)
135{
136 int ret = 0, i, tab_size, level;
137 struct freq_clip_table *tab_ptr, *clip_data;
138 struct exynos_thermal_zone *th_zone = thermal->devdata;
139 struct thermal_sensor_conf *data = th_zone->sensor_conf;
140
141 tab_ptr = (struct freq_clip_table *)data->cooling_data.freq_data;
142 tab_size = data->cooling_data.freq_clip_count;
143
144 if (tab_ptr == NULL || tab_size == 0)
145 return 0;
146
147 /* find the cooling device registered*/
148 for (i = 0; i < th_zone->cool_dev_size; i++)
149 if (cdev == th_zone->cool_dev[i])
150 break;
151
152 /* No matching cooling device */
153 if (i == th_zone->cool_dev_size)
154 return 0;
155
156 /* Bind the thermal zone to the cpufreq cooling device */
157 for (i = 0; i < tab_size; i++) {
158 clip_data = (struct freq_clip_table *)&(tab_ptr[i]);
159 level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max);
160 if (level == THERMAL_CSTATE_INVALID)
161 return 0;
162 switch (GET_ZONE(i)) {
163 case MONITOR_ZONE:
164 case WARN_ZONE:
165 if (thermal_zone_bind_cooling_device(thermal, i, cdev,
166 level, 0)) {
167 dev_err(data->dev,
168 "error unbinding cdev inst=%d\n", i);
169 ret = -EINVAL;
170 }
171 th_zone->bind = true;
172 break;
173 default:
174 ret = -EINVAL;
175 }
176 }
177
178 return ret;
179}
180
181/* Unbind callback functions for thermal zone */
182static int exynos_unbind(struct thermal_zone_device *thermal,
183 struct thermal_cooling_device *cdev)
184{
185 int ret = 0, i, tab_size;
186 struct exynos_thermal_zone *th_zone = thermal->devdata;
187 struct thermal_sensor_conf *data = th_zone->sensor_conf;
188
189 if (th_zone->bind == false)
190 return 0;
191
192 tab_size = data->cooling_data.freq_clip_count;
193
194 if (tab_size == 0)
195 return 0;
196
197 /* find the cooling device registered*/
198 for (i = 0; i < th_zone->cool_dev_size; i++)
199 if (cdev == th_zone->cool_dev[i])
200 break;
201
202 /* No matching cooling device */
203 if (i == th_zone->cool_dev_size)
204 return 0;
205
206 /* Bind the thermal zone to the cpufreq cooling device */
207 for (i = 0; i < tab_size; i++) {
208 switch (GET_ZONE(i)) {
209 case MONITOR_ZONE:
210 case WARN_ZONE:
211 if (thermal_zone_unbind_cooling_device(thermal, i,
212 cdev)) {
213 dev_err(data->dev,
214 "error unbinding cdev inst=%d\n", i);
215 ret = -EINVAL;
216 }
217 th_zone->bind = false;
218 break;
219 default:
220 ret = -EINVAL;
221 }
222 }
223 return ret;
224}
225
226/* Get temperature callback functions for thermal zone */
227static int exynos_get_temp(struct thermal_zone_device *thermal,
228 unsigned long *temp)
229{
230 struct exynos_thermal_zone *th_zone = thermal->devdata;
231 void *data;
232
233 if (!th_zone->sensor_conf) {
234 dev_err(&thermal->device,
235 "Temperature sensor not initialised\n");
236 return -EINVAL;
237 }
238 data = th_zone->sensor_conf->driver_data;
239 *temp = th_zone->sensor_conf->read_temperature(data);
240 /* convert the temperature into millicelsius */
241 *temp = *temp * MCELSIUS;
242 return 0;
243}
244
245/* Get temperature callback functions for thermal zone */
246static int exynos_set_emul_temp(struct thermal_zone_device *thermal,
247 unsigned long temp)
248{
249 void *data;
250 int ret = -EINVAL;
251 struct exynos_thermal_zone *th_zone = thermal->devdata;
252
253 if (!th_zone->sensor_conf) {
254 dev_err(&thermal->device,
255 "Temperature sensor not initialised\n");
256 return -EINVAL;
257 }
258 data = th_zone->sensor_conf->driver_data;
259 if (th_zone->sensor_conf->write_emul_temp)
260 ret = th_zone->sensor_conf->write_emul_temp(data, temp);
261 return ret;
262}
263
264/* Get the temperature trend */
265static int exynos_get_trend(struct thermal_zone_device *thermal,
266 int trip, enum thermal_trend *trend)
267{
268 int ret;
269 unsigned long trip_temp;
270
271 ret = exynos_get_trip_temp(thermal, trip, &trip_temp);
272 if (ret < 0)
273 return ret;
274
275 if (thermal->temperature >= trip_temp)
276 *trend = THERMAL_TREND_RAISE_FULL;
277 else
278 *trend = THERMAL_TREND_DROP_FULL;
279
280 return 0;
281}
282/* Operation callback functions for thermal zone */
283static struct thermal_zone_device_ops exynos_dev_ops = {
284 .bind = exynos_bind,
285 .unbind = exynos_unbind,
286 .get_temp = exynos_get_temp,
287 .set_emul_temp = exynos_set_emul_temp,
288 .get_trend = exynos_get_trend,
289 .get_mode = exynos_get_mode,
290 .set_mode = exynos_set_mode,
291 .get_trip_type = exynos_get_trip_type,
292 .get_trip_temp = exynos_get_trip_temp,
293 .get_crit_temp = exynos_get_crit_temp,
294};
295
296/*
297 * This function may be called from interrupt based temperature sensor
298 * when threshold is changed.
299 */
300void exynos_report_trigger(struct thermal_sensor_conf *conf)
301{
302 unsigned int i;
303 char data[10];
304 char *envp[] = { data, NULL };
305 struct exynos_thermal_zone *th_zone;
306
307 if (!conf || !conf->pzone_data) {
308 pr_err("Invalid temperature sensor configuration data\n");
309 return;
310 }
311
312 th_zone = conf->pzone_data;
313
314 if (th_zone->bind == false) {
315 for (i = 0; i < th_zone->cool_dev_size; i++) {
316 if (!th_zone->cool_dev[i])
317 continue;
318 exynos_bind(th_zone->therm_dev,
319 th_zone->cool_dev[i]);
320 }
321 }
322
323 thermal_zone_device_update(th_zone->therm_dev);
324
325 mutex_lock(&th_zone->therm_dev->lock);
326 /* Find the level for which trip happened */
327 for (i = 0; i < th_zone->sensor_conf->trip_data.trip_count; i++) {
328 if (th_zone->therm_dev->last_temperature <
329 th_zone->sensor_conf->trip_data.trip_val[i] * MCELSIUS)
330 break;
331 }
332
333 if (th_zone->mode == THERMAL_DEVICE_ENABLED &&
334 !th_zone->sensor_conf->trip_data.trigger_falling) {
335 if (i > 0)
336 th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL;
337 else
338 th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
339 }
340
341 snprintf(data, sizeof(data), "%u", i);
342 kobject_uevent_env(&th_zone->therm_dev->device.kobj, KOBJ_CHANGE, envp);
343 mutex_unlock(&th_zone->therm_dev->lock);
344}
345
346/* Register with the in-kernel thermal management */
347int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
348{
349 int ret;
350 struct exynos_thermal_zone *th_zone;
351
352 if (!sensor_conf || !sensor_conf->read_temperature) {
353 pr_err("Temperature sensor not initialised\n");
354 return -EINVAL;
355 }
356
357 th_zone = devm_kzalloc(sensor_conf->dev,
358 sizeof(struct exynos_thermal_zone), GFP_KERNEL);
359 if (!th_zone)
360 return -ENOMEM;
361
362 th_zone->sensor_conf = sensor_conf;
363 /*
364 * TODO: 1) Handle multiple cooling devices in a thermal zone
365 * 2) Add a flag/name in cooling info to map to specific
366 * sensor
367 */
368 if (sensor_conf->cooling_data.freq_clip_count > 0) {
369 th_zone->cool_dev[th_zone->cool_dev_size] =
370 cpufreq_cooling_register(cpu_present_mask);
371 if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) {
372 ret = PTR_ERR(th_zone->cool_dev[th_zone->cool_dev_size]);
373 if (ret != -EPROBE_DEFER)
374 dev_err(sensor_conf->dev,
375 "Failed to register cpufreq cooling device: %d\n",
376 ret);
377 goto err_unregister;
378 }
379 th_zone->cool_dev_size++;
380 }
381
382 th_zone->therm_dev = thermal_zone_device_register(
383 sensor_conf->name, sensor_conf->trip_data.trip_count,
384 0, th_zone, &exynos_dev_ops, NULL, 0,
385 sensor_conf->trip_data.trigger_falling ? 0 :
386 IDLE_INTERVAL);
387
388 if (IS_ERR(th_zone->therm_dev)) {
389 dev_err(sensor_conf->dev,
390 "Failed to register thermal zone device\n");
391 ret = PTR_ERR(th_zone->therm_dev);
392 goto err_unregister;
393 }
394 th_zone->mode = THERMAL_DEVICE_ENABLED;
395 sensor_conf->pzone_data = th_zone;
396
397 dev_info(sensor_conf->dev,
398 "Exynos: Thermal zone(%s) registered\n", sensor_conf->name);
399
400 return 0;
401
402err_unregister:
403 exynos_unregister_thermal(sensor_conf);
404 return ret;
405}
406
407/* Un-Register with the in-kernel thermal management */
408void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf)
409{
410 int i;
411 struct exynos_thermal_zone *th_zone;
412
413 if (!sensor_conf || !sensor_conf->pzone_data) {
414 pr_err("Invalid temperature sensor configuration data\n");
415 return;
416 }
417
418 th_zone = sensor_conf->pzone_data;
419
420 thermal_zone_device_unregister(th_zone->therm_dev);
421
422 for (i = 0; i < th_zone->cool_dev_size; ++i)
423 cpufreq_cooling_unregister(th_zone->cool_dev[i]);
424
425 dev_info(sensor_conf->dev,
426 "Exynos: Kernel Thermal management unregistered\n");
427}
diff --git a/drivers/thermal/samsung/exynos_thermal_common.h b/drivers/thermal/samsung/exynos_thermal_common.h
deleted file mode 100644
index cd4471925cdd..000000000000
--- a/drivers/thermal/samsung/exynos_thermal_common.h
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * exynos_thermal_common.h - Samsung EXYNOS common header file
3 *
4 * Copyright (C) 2013 Samsung Electronics
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _EXYNOS_THERMAL_COMMON_H
24#define _EXYNOS_THERMAL_COMMON_H
25
26/* In-kernel thermal framework related macros & definations */
27#define SENSOR_NAME_LEN 16
28#define MAX_TRIP_COUNT 8
29#define MAX_COOLING_DEVICE 4
30
31#define ACTIVE_INTERVAL 500
32#define IDLE_INTERVAL 10000
33#define MCELSIUS 1000
34
35/* CPU Zone information */
36#define PANIC_ZONE 4
37#define WARN_ZONE 3
38#define MONITOR_ZONE 2
39#define SAFE_ZONE 1
40
41#define GET_ZONE(trip) (trip + 2)
42#define GET_TRIP(zone) (zone - 2)
43
44enum trigger_type {
45 THROTTLE_ACTIVE = 1,
46 THROTTLE_PASSIVE,
47 SW_TRIP,
48 HW_TRIP,
49};
50
51/**
52 * struct freq_clip_table
53 * @freq_clip_max: maximum frequency allowed for this cooling state.
54 * @temp_level: Temperature level at which the temperature clipping will
55 * happen.
56 * @mask_val: cpumask of the allowed cpu's where the clipping will take place.
57 *
58 * This structure is required to be filled and passed to the
59 * cpufreq_cooling_unregister function.
60 */
61struct freq_clip_table {
62 unsigned int freq_clip_max;
63 unsigned int temp_level;
64 const struct cpumask *mask_val;
65};
66
67struct thermal_trip_point_conf {
68 int trip_val[MAX_TRIP_COUNT];
69 int trip_type[MAX_TRIP_COUNT];
70 int trip_count;
71 unsigned char trigger_falling;
72};
73
74struct thermal_cooling_conf {
75 struct freq_clip_table freq_data[MAX_TRIP_COUNT];
76 int freq_clip_count;
77};
78
79struct thermal_sensor_conf {
80 char name[SENSOR_NAME_LEN];
81 int (*read_temperature)(void *data);
82 int (*write_emul_temp)(void *drv_data, unsigned long temp);
83 struct thermal_trip_point_conf trip_data;
84 struct thermal_cooling_conf cooling_data;
85 void *driver_data;
86 void *pzone_data;
87 struct device *dev;
88};
89
90/*Functions used exynos based thermal sensor driver*/
91#ifdef CONFIG_EXYNOS_THERMAL_CORE
92void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf);
93int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf);
94void exynos_report_trigger(struct thermal_sensor_conf *sensor_conf);
95#else
96static inline void
97exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf) { return; }
98
99static inline int
100exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) { return 0; }
101
102static inline void
103exynos_report_trigger(struct thermal_sensor_conf *sensor_conf) { return; }
104
105#endif /* CONFIG_EXYNOS_THERMAL_CORE */
106#endif /* _EXYNOS_THERMAL_COMMON_H */
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index d2f1e62a4232..1fc54ab911d2 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1,6 +1,10 @@
1/* 1/*
2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit) 2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
3 * 3 *
4 * Copyright (C) 2014 Samsung Electronics
5 * Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
6 * Lukasz Majewski <l.majewski@samsung.com>
7 *
4 * Copyright (C) 2011 Samsung Electronics 8 * Copyright (C) 2011 Samsung Electronics
5 * Donggeun Kim <dg77.kim@samsung.com> 9 * Donggeun Kim <dg77.kim@samsung.com>
6 * Amit Daniel Kachhap <amit.kachhap@linaro.org> 10 * Amit Daniel Kachhap <amit.kachhap@linaro.org>
@@ -31,8 +35,8 @@
31#include <linux/platform_device.h> 35#include <linux/platform_device.h>
32#include <linux/regulator/consumer.h> 36#include <linux/regulator/consumer.h>
33 37
34#include "exynos_thermal_common.h"
35#include "exynos_tmu.h" 38#include "exynos_tmu.h"
39#include "../thermal_core.h"
36 40
37/* Exynos generic registers */ 41/* Exynos generic registers */
38#define EXYNOS_TMU_REG_TRIMINFO 0x0 42#define EXYNOS_TMU_REG_TRIMINFO 0x0
@@ -115,6 +119,27 @@
115#define EXYNOS5440_TMU_TH_RISE4_SHIFT 24 119#define EXYNOS5440_TMU_TH_RISE4_SHIFT 24
116#define EXYNOS5440_EFUSE_SWAP_OFFSET 8 120#define EXYNOS5440_EFUSE_SWAP_OFFSET 8
117 121
122/* Exynos7 specific registers */
123#define EXYNOS7_THD_TEMP_RISE7_6 0x50
124#define EXYNOS7_THD_TEMP_FALL7_6 0x60
125#define EXYNOS7_TMU_REG_INTEN 0x110
126#define EXYNOS7_TMU_REG_INTPEND 0x118
127#define EXYNOS7_TMU_REG_EMUL_CON 0x160
128
129#define EXYNOS7_TMU_TEMP_MASK 0x1ff
130#define EXYNOS7_PD_DET_EN_SHIFT 23
131#define EXYNOS7_TMU_INTEN_RISE0_SHIFT 0
132#define EXYNOS7_TMU_INTEN_RISE1_SHIFT 1
133#define EXYNOS7_TMU_INTEN_RISE2_SHIFT 2
134#define EXYNOS7_TMU_INTEN_RISE3_SHIFT 3
135#define EXYNOS7_TMU_INTEN_RISE4_SHIFT 4
136#define EXYNOS7_TMU_INTEN_RISE5_SHIFT 5
137#define EXYNOS7_TMU_INTEN_RISE6_SHIFT 6
138#define EXYNOS7_TMU_INTEN_RISE7_SHIFT 7
139#define EXYNOS7_EMUL_DATA_SHIFT 7
140#define EXYNOS7_EMUL_DATA_MASK 0x1ff
141
142#define MCELSIUS 1000
118/** 143/**
119 * struct exynos_tmu_data : A structure to hold the private data of the TMU 144 * struct exynos_tmu_data : A structure to hold the private data of the TMU
120 driver 145 driver
@@ -128,6 +153,7 @@
128 * @lock: lock to implement synchronization. 153 * @lock: lock to implement synchronization.
129 * @clk: pointer to the clock structure. 154 * @clk: pointer to the clock structure.
130 * @clk_sec: pointer to the clock structure for accessing the base_second. 155 * @clk_sec: pointer to the clock structure for accessing the base_second.
156 * @sclk: pointer to the clock structure for accessing the tmu special clk.
131 * @temp_error1: fused value of the first point trim. 157 * @temp_error1: fused value of the first point trim.
132 * @temp_error2: fused value of the second point trim. 158 * @temp_error2: fused value of the second point trim.
133 * @regulator: pointer to the TMU regulator structure. 159 * @regulator: pointer to the TMU regulator structure.
@@ -147,10 +173,11 @@ struct exynos_tmu_data {
147 enum soc_type soc; 173 enum soc_type soc;
148 struct work_struct irq_work; 174 struct work_struct irq_work;
149 struct mutex lock; 175 struct mutex lock;
150 struct clk *clk, *clk_sec; 176 struct clk *clk, *clk_sec, *sclk;
151 u8 temp_error1, temp_error2; 177 u16 temp_error1, temp_error2;
152 struct regulator *regulator; 178 struct regulator *regulator;
153 struct thermal_sensor_conf *reg_conf; 179 struct thermal_zone_device *tzd;
180
154 int (*tmu_initialize)(struct platform_device *pdev); 181 int (*tmu_initialize)(struct platform_device *pdev);
155 void (*tmu_control)(struct platform_device *pdev, bool on); 182 void (*tmu_control)(struct platform_device *pdev, bool on);
156 int (*tmu_read)(struct exynos_tmu_data *data); 183 int (*tmu_read)(struct exynos_tmu_data *data);
@@ -159,6 +186,33 @@ struct exynos_tmu_data {
159 void (*tmu_clear_irqs)(struct exynos_tmu_data *data); 186 void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
160}; 187};
161 188
189static void exynos_report_trigger(struct exynos_tmu_data *p)
190{
191 char data[10], *envp[] = { data, NULL };
192 struct thermal_zone_device *tz = p->tzd;
193 unsigned long temp;
194 unsigned int i;
195
196 if (!tz) {
197 pr_err("No thermal zone device defined\n");
198 return;
199 }
200
201 thermal_zone_device_update(tz);
202
203 mutex_lock(&tz->lock);
204 /* Find the level for which trip happened */
205 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
206 tz->ops->get_trip_temp(tz, i, &temp);
207 if (tz->last_temperature < temp)
208 break;
209 }
210
211 snprintf(data, sizeof(data), "%u", i);
212 kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, envp);
213 mutex_unlock(&tz->lock);
214}
215
162/* 216/*
163 * TMU treats temperature as a mapped temperature code. 217 * TMU treats temperature as a mapped temperature code.
164 * The temperature is converted differently depending on the calibration type. 218 * The temperature is converted differently depending on the calibration type.
@@ -190,7 +244,7 @@ static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
190 * Calculate a temperature value from a temperature code. 244 * Calculate a temperature value from a temperature code.
191 * The unit of the temperature is degree Celsius. 245 * The unit of the temperature is degree Celsius.
192 */ 246 */
193static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code) 247static int code_to_temp(struct exynos_tmu_data *data, u16 temp_code)
194{ 248{
195 struct exynos_tmu_platform_data *pdata = data->pdata; 249 struct exynos_tmu_platform_data *pdata = data->pdata;
196 int temp; 250 int temp;
@@ -234,14 +288,25 @@ static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
234 288
235static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling) 289static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling)
236{ 290{
237 struct exynos_tmu_platform_data *pdata = data->pdata; 291 struct thermal_zone_device *tz = data->tzd;
292 const struct thermal_trip * const trips =
293 of_thermal_get_trip_points(tz);
294 unsigned long temp;
238 int i; 295 int i;
239 296
240 for (i = 0; i < pdata->non_hw_trigger_levels; i++) { 297 if (!trips) {
241 u8 temp = pdata->trigger_levels[i]; 298 pr_err("%s: Cannot get trip points from of-thermal.c!\n",
299 __func__);
300 return 0;
301 }
302
303 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
304 if (trips[i].type == THERMAL_TRIP_CRITICAL)
305 continue;
242 306
307 temp = trips[i].temperature / MCELSIUS;
243 if (falling) 308 if (falling)
244 temp -= pdata->threshold_falling; 309 temp -= (trips[i].hysteresis / MCELSIUS);
245 else 310 else
246 threshold &= ~(0xff << 8 * i); 311 threshold &= ~(0xff << 8 * i);
247 312
@@ -305,9 +370,19 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
305static int exynos4210_tmu_initialize(struct platform_device *pdev) 370static int exynos4210_tmu_initialize(struct platform_device *pdev)
306{ 371{
307 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 372 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
308 struct exynos_tmu_platform_data *pdata = data->pdata; 373 struct thermal_zone_device *tz = data->tzd;
309 unsigned int status; 374 const struct thermal_trip * const trips =
375 of_thermal_get_trip_points(tz);
310 int ret = 0, threshold_code, i; 376 int ret = 0, threshold_code, i;
377 unsigned long reference, temp;
378 unsigned int status;
379
380 if (!trips) {
381 pr_err("%s: Cannot get trip points from of-thermal.c!\n",
382 __func__);
383 ret = -ENODEV;
384 goto out;
385 }
311 386
312 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 387 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
313 if (!status) { 388 if (!status) {
@@ -318,12 +393,19 @@ static int exynos4210_tmu_initialize(struct platform_device *pdev)
318 sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO)); 393 sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
319 394
320 /* Write temperature code for threshold */ 395 /* Write temperature code for threshold */
321 threshold_code = temp_to_code(data, pdata->threshold); 396 reference = trips[0].temperature / MCELSIUS;
397 threshold_code = temp_to_code(data, reference);
398 if (threshold_code < 0) {
399 ret = threshold_code;
400 goto out;
401 }
322 writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP); 402 writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
323 403
324 for (i = 0; i < pdata->non_hw_trigger_levels; i++) 404 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
325 writeb(pdata->trigger_levels[i], data->base + 405 temp = trips[i].temperature / MCELSIUS;
406 writeb(temp - reference, data->base +
326 EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4); 407 EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
408 }
327 409
328 data->tmu_clear_irqs(data); 410 data->tmu_clear_irqs(data);
329out: 411out:
@@ -333,9 +415,11 @@ out:
333static int exynos4412_tmu_initialize(struct platform_device *pdev) 415static int exynos4412_tmu_initialize(struct platform_device *pdev)
334{ 416{
335 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 417 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
336 struct exynos_tmu_platform_data *pdata = data->pdata; 418 const struct thermal_trip * const trips =
419 of_thermal_get_trip_points(data->tzd);
337 unsigned int status, trim_info, con, ctrl, rising_threshold; 420 unsigned int status, trim_info, con, ctrl, rising_threshold;
338 int ret = 0, threshold_code, i; 421 int ret = 0, threshold_code, i;
422 unsigned long crit_temp = 0;
339 423
340 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 424 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
341 if (!status) { 425 if (!status) {
@@ -373,17 +457,29 @@ static int exynos4412_tmu_initialize(struct platform_device *pdev)
373 data->tmu_clear_irqs(data); 457 data->tmu_clear_irqs(data);
374 458
375 /* if last threshold limit is also present */ 459 /* if last threshold limit is also present */
376 i = pdata->max_trigger_level - 1; 460 for (i = 0; i < of_thermal_get_ntrips(data->tzd); i++) {
377 if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) { 461 if (trips[i].type == THERMAL_TRIP_CRITICAL) {
378 threshold_code = temp_to_code(data, pdata->trigger_levels[i]); 462 crit_temp = trips[i].temperature;
379 /* 1-4 level to be assigned in th0 reg */ 463 break;
380 rising_threshold &= ~(0xff << 8 * i); 464 }
381 rising_threshold |= threshold_code << 8 * i; 465 }
382 writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE); 466
383 con = readl(data->base + EXYNOS_TMU_REG_CONTROL); 467 if (i == of_thermal_get_ntrips(data->tzd)) {
384 con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT); 468 pr_err("%s: No CRITICAL trip point defined at of-thermal.c!\n",
385 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 469 __func__);
470 ret = -EINVAL;
471 goto out;
386 } 472 }
473
474 threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
475 /* 1-4 level to be assigned in th0 reg */
476 rising_threshold &= ~(0xff << 8 * i);
477 rising_threshold |= threshold_code << 8 * i;
478 writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
479 con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
480 con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
481 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
482
387out: 483out:
388 return ret; 484 return ret;
389} 485}
@@ -391,9 +487,9 @@ out:
391static int exynos5440_tmu_initialize(struct platform_device *pdev) 487static int exynos5440_tmu_initialize(struct platform_device *pdev)
392{ 488{
393 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 489 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
394 struct exynos_tmu_platform_data *pdata = data->pdata;
395 unsigned int trim_info = 0, con, rising_threshold; 490 unsigned int trim_info = 0, con, rising_threshold;
396 int ret = 0, threshold_code, i; 491 int ret = 0, threshold_code;
492 unsigned long crit_temp = 0;
397 493
398 /* 494 /*
399 * For exynos5440 soc triminfo value is swapped between TMU0 and 495 * For exynos5440 soc triminfo value is swapped between TMU0 and
@@ -422,9 +518,8 @@ static int exynos5440_tmu_initialize(struct platform_device *pdev)
422 data->tmu_clear_irqs(data); 518 data->tmu_clear_irqs(data);
423 519
424 /* if last threshold limit is also present */ 520 /* if last threshold limit is also present */
425 i = pdata->max_trigger_level - 1; 521 if (!data->tzd->ops->get_crit_temp(data->tzd, &crit_temp)) {
426 if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) { 522 threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
427 threshold_code = temp_to_code(data, pdata->trigger_levels[i]);
428 /* 5th level to be assigned in th2 reg */ 523 /* 5th level to be assigned in th2 reg */
429 rising_threshold = 524 rising_threshold =
430 threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT; 525 threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT;
@@ -439,10 +534,88 @@ static int exynos5440_tmu_initialize(struct platform_device *pdev)
439 return ret; 534 return ret;
440} 535}
441 536
442static void exynos4210_tmu_control(struct platform_device *pdev, bool on) 537static int exynos7_tmu_initialize(struct platform_device *pdev)
443{ 538{
444 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 539 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
540 struct thermal_zone_device *tz = data->tzd;
445 struct exynos_tmu_platform_data *pdata = data->pdata; 541 struct exynos_tmu_platform_data *pdata = data->pdata;
542 unsigned int status, trim_info;
543 unsigned int rising_threshold = 0, falling_threshold = 0;
544 int ret = 0, threshold_code, i;
545 unsigned long temp, temp_hist;
546 unsigned int reg_off, bit_off;
547
548 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
549 if (!status) {
550 ret = -EBUSY;
551 goto out;
552 }
553
554 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
555
556 data->temp_error1 = trim_info & EXYNOS7_TMU_TEMP_MASK;
557 if (!data->temp_error1 ||
558 (pdata->min_efuse_value > data->temp_error1) ||
559 (data->temp_error1 > pdata->max_efuse_value))
560 data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
561
562 /* Write temperature code for rising and falling threshold */
563 for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) {
564 /*
565 * On exynos7 there are 4 rising and 4 falling threshold
566 * registers (0x50-0x5c and 0x60-0x6c respectively). Each
567 * register holds the value of two threshold levels (at bit
568 * offsets 0 and 16). Based on the fact that there are atmost
569 * eight possible trigger levels, calculate the register and
570 * bit offsets where the threshold levels are to be written.
571 *
572 * e.g. EXYNOS7_THD_TEMP_RISE7_6 (0x50)
573 * [24:16] - Threshold level 7
574 * [8:0] - Threshold level 6
575 * e.g. EXYNOS7_THD_TEMP_RISE5_4 (0x54)
576 * [24:16] - Threshold level 5
577 * [8:0] - Threshold level 4
578 *
579 * and similarly for falling thresholds.
580 *
581 * Based on the above, calculate the register and bit offsets
582 * for rising/falling threshold levels and populate them.
583 */
584 reg_off = ((7 - i) / 2) * 4;
585 bit_off = ((8 - i) % 2);
586
587 tz->ops->get_trip_temp(tz, i, &temp);
588 temp /= MCELSIUS;
589
590 tz->ops->get_trip_hyst(tz, i, &temp_hist);
591 temp_hist = temp - (temp_hist / MCELSIUS);
592
593 /* Set 9-bit temperature code for rising threshold levels */
594 threshold_code = temp_to_code(data, temp);
595 rising_threshold = readl(data->base +
596 EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
597 rising_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
598 rising_threshold |= threshold_code << (16 * bit_off);
599 writel(rising_threshold,
600 data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
601
602 /* Set 9-bit temperature code for falling threshold levels */
603 threshold_code = temp_to_code(data, temp_hist);
604 falling_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
605 falling_threshold |= threshold_code << (16 * bit_off);
606 writel(falling_threshold,
607 data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off);
608 }
609
610 data->tmu_clear_irqs(data);
611out:
612 return ret;
613}
614
615static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
616{
617 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
618 struct thermal_zone_device *tz = data->tzd;
446 unsigned int con, interrupt_en; 619 unsigned int con, interrupt_en;
447 620
448 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); 621 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
@@ -450,10 +623,15 @@ static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
450 if (on) { 623 if (on) {
451 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 624 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
452 interrupt_en = 625 interrupt_en =
453 pdata->trigger_enable[3] << EXYNOS_TMU_INTEN_RISE3_SHIFT | 626 (of_thermal_is_trip_valid(tz, 3)
454 pdata->trigger_enable[2] << EXYNOS_TMU_INTEN_RISE2_SHIFT | 627 << EXYNOS_TMU_INTEN_RISE3_SHIFT) |
455 pdata->trigger_enable[1] << EXYNOS_TMU_INTEN_RISE1_SHIFT | 628 (of_thermal_is_trip_valid(tz, 2)
456 pdata->trigger_enable[0] << EXYNOS_TMU_INTEN_RISE0_SHIFT; 629 << EXYNOS_TMU_INTEN_RISE2_SHIFT) |
630 (of_thermal_is_trip_valid(tz, 1)
631 << EXYNOS_TMU_INTEN_RISE1_SHIFT) |
632 (of_thermal_is_trip_valid(tz, 0)
633 << EXYNOS_TMU_INTEN_RISE0_SHIFT);
634
457 if (data->soc != SOC_ARCH_EXYNOS4210) 635 if (data->soc != SOC_ARCH_EXYNOS4210)
458 interrupt_en |= 636 interrupt_en |=
459 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; 637 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
@@ -468,7 +646,7 @@ static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
468static void exynos5440_tmu_control(struct platform_device *pdev, bool on) 646static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
469{ 647{
470 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 648 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
471 struct exynos_tmu_platform_data *pdata = data->pdata; 649 struct thermal_zone_device *tz = data->tzd;
472 unsigned int con, interrupt_en; 650 unsigned int con, interrupt_en;
473 651
474 con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL)); 652 con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL));
@@ -476,11 +654,16 @@ static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
476 if (on) { 654 if (on) {
477 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 655 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
478 interrupt_en = 656 interrupt_en =
479 pdata->trigger_enable[3] << EXYNOS5440_TMU_INTEN_RISE3_SHIFT | 657 (of_thermal_is_trip_valid(tz, 3)
480 pdata->trigger_enable[2] << EXYNOS5440_TMU_INTEN_RISE2_SHIFT | 658 << EXYNOS5440_TMU_INTEN_RISE3_SHIFT) |
481 pdata->trigger_enable[1] << EXYNOS5440_TMU_INTEN_RISE1_SHIFT | 659 (of_thermal_is_trip_valid(tz, 2)
482 pdata->trigger_enable[0] << EXYNOS5440_TMU_INTEN_RISE0_SHIFT; 660 << EXYNOS5440_TMU_INTEN_RISE2_SHIFT) |
483 interrupt_en |= interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT; 661 (of_thermal_is_trip_valid(tz, 1)
662 << EXYNOS5440_TMU_INTEN_RISE1_SHIFT) |
663 (of_thermal_is_trip_valid(tz, 0)
664 << EXYNOS5440_TMU_INTEN_RISE0_SHIFT);
665 interrupt_en |=
666 interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT;
484 } else { 667 } else {
485 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); 668 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
486 interrupt_en = 0; /* Disable all interrupts */ 669 interrupt_en = 0; /* Disable all interrupts */
@@ -489,19 +672,62 @@ static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
489 writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL); 672 writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
490} 673}
491 674
492static int exynos_tmu_read(struct exynos_tmu_data *data) 675static void exynos7_tmu_control(struct platform_device *pdev, bool on)
493{ 676{
494 int ret; 677 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
678 struct thermal_zone_device *tz = data->tzd;
679 unsigned int con, interrupt_en;
680
681 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
682
683 if (on) {
684 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
685 interrupt_en =
686 (of_thermal_is_trip_valid(tz, 7)
687 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
688 (of_thermal_is_trip_valid(tz, 6)
689 << EXYNOS7_TMU_INTEN_RISE6_SHIFT) |
690 (of_thermal_is_trip_valid(tz, 5)
691 << EXYNOS7_TMU_INTEN_RISE5_SHIFT) |
692 (of_thermal_is_trip_valid(tz, 4)
693 << EXYNOS7_TMU_INTEN_RISE4_SHIFT) |
694 (of_thermal_is_trip_valid(tz, 3)
695 << EXYNOS7_TMU_INTEN_RISE3_SHIFT) |
696 (of_thermal_is_trip_valid(tz, 2)
697 << EXYNOS7_TMU_INTEN_RISE2_SHIFT) |
698 (of_thermal_is_trip_valid(tz, 1)
699 << EXYNOS7_TMU_INTEN_RISE1_SHIFT) |
700 (of_thermal_is_trip_valid(tz, 0)
701 << EXYNOS7_TMU_INTEN_RISE0_SHIFT);
702
703 interrupt_en |=
704 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
705 } else {
706 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
707 interrupt_en = 0; /* Disable all interrupts */
708 }
709 con |= 1 << EXYNOS7_PD_DET_EN_SHIFT;
710
711 writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
712 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
713}
714
715static int exynos_get_temp(void *p, long *temp)
716{
717 struct exynos_tmu_data *data = p;
718
719 if (!data || !data->tmu_read)
720 return -EINVAL;
495 721
496 mutex_lock(&data->lock); 722 mutex_lock(&data->lock);
497 clk_enable(data->clk); 723 clk_enable(data->clk);
498 ret = data->tmu_read(data); 724
499 if (ret >= 0) 725 *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
500 ret = code_to_temp(data, ret); 726
501 clk_disable(data->clk); 727 clk_disable(data->clk);
502 mutex_unlock(&data->lock); 728 mutex_unlock(&data->lock);
503 729
504 return ret; 730 return 0;
505} 731}
506 732
507#ifdef CONFIG_THERMAL_EMULATION 733#ifdef CONFIG_THERMAL_EMULATION
@@ -515,9 +741,19 @@ static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
515 val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT); 741 val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
516 val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT); 742 val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
517 } 743 }
518 val &= ~(EXYNOS_EMUL_DATA_MASK << EXYNOS_EMUL_DATA_SHIFT); 744 if (data->soc == SOC_ARCH_EXYNOS7) {
519 val |= (temp_to_code(data, temp) << EXYNOS_EMUL_DATA_SHIFT) | 745 val &= ~(EXYNOS7_EMUL_DATA_MASK <<
520 EXYNOS_EMUL_ENABLE; 746 EXYNOS7_EMUL_DATA_SHIFT);
747 val |= (temp_to_code(data, temp) <<
748 EXYNOS7_EMUL_DATA_SHIFT) |
749 EXYNOS_EMUL_ENABLE;
750 } else {
751 val &= ~(EXYNOS_EMUL_DATA_MASK <<
752 EXYNOS_EMUL_DATA_SHIFT);
753 val |= (temp_to_code(data, temp) <<
754 EXYNOS_EMUL_DATA_SHIFT) |
755 EXYNOS_EMUL_ENABLE;
756 }
521 } else { 757 } else {
522 val &= ~EXYNOS_EMUL_ENABLE; 758 val &= ~EXYNOS_EMUL_ENABLE;
523 } 759 }
@@ -533,6 +769,8 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
533 769
534 if (data->soc == SOC_ARCH_EXYNOS5260) 770 if (data->soc == SOC_ARCH_EXYNOS5260)
535 emul_con = EXYNOS5260_EMUL_CON; 771 emul_con = EXYNOS5260_EMUL_CON;
772 else if (data->soc == SOC_ARCH_EXYNOS7)
773 emul_con = EXYNOS7_TMU_REG_EMUL_CON;
536 else 774 else
537 emul_con = EXYNOS_EMUL_CON; 775 emul_con = EXYNOS_EMUL_CON;
538 776
@@ -576,7 +814,7 @@ out:
576#define exynos5440_tmu_set_emulation NULL 814#define exynos5440_tmu_set_emulation NULL
577static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp) 815static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
578 { return -EINVAL; } 816 { return -EINVAL; }
579#endif/*CONFIG_THERMAL_EMULATION*/ 817#endif /* CONFIG_THERMAL_EMULATION */
580 818
581static int exynos4210_tmu_read(struct exynos_tmu_data *data) 819static int exynos4210_tmu_read(struct exynos_tmu_data *data)
582{ 820{
@@ -596,6 +834,12 @@ static int exynos5440_tmu_read(struct exynos_tmu_data *data)
596 return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP); 834 return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP);
597} 835}
598 836
837static int exynos7_tmu_read(struct exynos_tmu_data *data)
838{
839 return readw(data->base + EXYNOS_TMU_REG_CURRENT_TEMP) &
840 EXYNOS7_TMU_TEMP_MASK;
841}
842
599static void exynos_tmu_work(struct work_struct *work) 843static void exynos_tmu_work(struct work_struct *work)
600{ 844{
601 struct exynos_tmu_data *data = container_of(work, 845 struct exynos_tmu_data *data = container_of(work,
@@ -613,7 +857,7 @@ static void exynos_tmu_work(struct work_struct *work)
613 if (!IS_ERR(data->clk_sec)) 857 if (!IS_ERR(data->clk_sec))
614 clk_disable(data->clk_sec); 858 clk_disable(data->clk_sec);
615 859
616 exynos_report_trigger(data->reg_conf); 860 exynos_report_trigger(data);
617 mutex_lock(&data->lock); 861 mutex_lock(&data->lock);
618 clk_enable(data->clk); 862 clk_enable(data->clk);
619 863
@@ -634,6 +878,9 @@ static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
634 if (data->soc == SOC_ARCH_EXYNOS5260) { 878 if (data->soc == SOC_ARCH_EXYNOS5260) {
635 tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT; 879 tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT;
636 tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR; 880 tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR;
881 } else if (data->soc == SOC_ARCH_EXYNOS7) {
882 tmu_intstat = EXYNOS7_TMU_REG_INTPEND;
883 tmu_intclear = EXYNOS7_TMU_REG_INTPEND;
637 } else { 884 } else {
638 tmu_intstat = EXYNOS_TMU_REG_INTSTAT; 885 tmu_intstat = EXYNOS_TMU_REG_INTSTAT;
639 tmu_intclear = EXYNOS_TMU_REG_INTCLEAR; 886 tmu_intclear = EXYNOS_TMU_REG_INTCLEAR;
@@ -671,57 +918,78 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id)
671} 918}
672 919
673static const struct of_device_id exynos_tmu_match[] = { 920static const struct of_device_id exynos_tmu_match[] = {
674 { 921 { .compatible = "samsung,exynos3250-tmu", },
675 .compatible = "samsung,exynos3250-tmu", 922 { .compatible = "samsung,exynos4210-tmu", },
676 .data = &exynos3250_default_tmu_data, 923 { .compatible = "samsung,exynos4412-tmu", },
677 }, 924 { .compatible = "samsung,exynos5250-tmu", },
678 { 925 { .compatible = "samsung,exynos5260-tmu", },
679 .compatible = "samsung,exynos4210-tmu", 926 { .compatible = "samsung,exynos5420-tmu", },
680 .data = &exynos4210_default_tmu_data, 927 { .compatible = "samsung,exynos5420-tmu-ext-triminfo", },
681 }, 928 { .compatible = "samsung,exynos5440-tmu", },
682 { 929 { .compatible = "samsung,exynos7-tmu", },
683 .compatible = "samsung,exynos4412-tmu", 930 { /* sentinel */ },
684 .data = &exynos4412_default_tmu_data,
685 },
686 {
687 .compatible = "samsung,exynos5250-tmu",
688 .data = &exynos5250_default_tmu_data,
689 },
690 {
691 .compatible = "samsung,exynos5260-tmu",
692 .data = &exynos5260_default_tmu_data,
693 },
694 {
695 .compatible = "samsung,exynos5420-tmu",
696 .data = &exynos5420_default_tmu_data,
697 },
698 {
699 .compatible = "samsung,exynos5420-tmu-ext-triminfo",
700 .data = &exynos5420_default_tmu_data,
701 },
702 {
703 .compatible = "samsung,exynos5440-tmu",
704 .data = &exynos5440_default_tmu_data,
705 },
706 {},
707}; 931};
708MODULE_DEVICE_TABLE(of, exynos_tmu_match); 932MODULE_DEVICE_TABLE(of, exynos_tmu_match);
709 933
710static inline struct exynos_tmu_platform_data *exynos_get_driver_data( 934static int exynos_of_get_soc_type(struct device_node *np)
711 struct platform_device *pdev, int id) 935{
936 if (of_device_is_compatible(np, "samsung,exynos3250-tmu"))
937 return SOC_ARCH_EXYNOS3250;
938 else if (of_device_is_compatible(np, "samsung,exynos4210-tmu"))
939 return SOC_ARCH_EXYNOS4210;
940 else if (of_device_is_compatible(np, "samsung,exynos4412-tmu"))
941 return SOC_ARCH_EXYNOS4412;
942 else if (of_device_is_compatible(np, "samsung,exynos5250-tmu"))
943 return SOC_ARCH_EXYNOS5250;
944 else if (of_device_is_compatible(np, "samsung,exynos5260-tmu"))
945 return SOC_ARCH_EXYNOS5260;
946 else if (of_device_is_compatible(np, "samsung,exynos5420-tmu"))
947 return SOC_ARCH_EXYNOS5420;
948 else if (of_device_is_compatible(np,
949 "samsung,exynos5420-tmu-ext-triminfo"))
950 return SOC_ARCH_EXYNOS5420_TRIMINFO;
951 else if (of_device_is_compatible(np, "samsung,exynos5440-tmu"))
952 return SOC_ARCH_EXYNOS5440;
953 else if (of_device_is_compatible(np, "samsung,exynos7-tmu"))
954 return SOC_ARCH_EXYNOS7;
955
956 return -EINVAL;
957}
958
959static int exynos_of_sensor_conf(struct device_node *np,
960 struct exynos_tmu_platform_data *pdata)
712{ 961{
713 struct exynos_tmu_init_data *data_table; 962 u32 value;
714 struct exynos_tmu_platform_data *tmu_data; 963 int ret;
715 const struct of_device_id *match;
716 964
717 match = of_match_node(exynos_tmu_match, pdev->dev.of_node); 965 of_node_get(np);
718 if (!match) 966
719 return NULL; 967 ret = of_property_read_u32(np, "samsung,tmu_gain", &value);
720 data_table = (struct exynos_tmu_init_data *) match->data; 968 pdata->gain = (u8)value;
721 if (!data_table || id >= data_table->tmu_count) 969 of_property_read_u32(np, "samsung,tmu_reference_voltage", &value);
722 return NULL; 970 pdata->reference_voltage = (u8)value;
723 tmu_data = data_table->tmu_data; 971 of_property_read_u32(np, "samsung,tmu_noise_cancel_mode", &value);
724 return (struct exynos_tmu_platform_data *) (tmu_data + id); 972 pdata->noise_cancel_mode = (u8)value;
973
974 of_property_read_u32(np, "samsung,tmu_efuse_value",
975 &pdata->efuse_value);
976 of_property_read_u32(np, "samsung,tmu_min_efuse_value",
977 &pdata->min_efuse_value);
978 of_property_read_u32(np, "samsung,tmu_max_efuse_value",
979 &pdata->max_efuse_value);
980
981 of_property_read_u32(np, "samsung,tmu_first_point_trim", &value);
982 pdata->first_point_trim = (u8)value;
983 of_property_read_u32(np, "samsung,tmu_second_point_trim", &value);
984 pdata->second_point_trim = (u8)value;
985 of_property_read_u32(np, "samsung,tmu_default_temp_offset", &value);
986 pdata->default_temp_offset = (u8)value;
987
988 of_property_read_u32(np, "samsung,tmu_cal_type", &pdata->cal_type);
989 of_property_read_u32(np, "samsung,tmu_cal_mode", &pdata->cal_mode);
990
991 of_node_put(np);
992 return 0;
725} 993}
726 994
727static int exynos_map_dt_data(struct platform_device *pdev) 995static int exynos_map_dt_data(struct platform_device *pdev)
@@ -771,14 +1039,15 @@ static int exynos_map_dt_data(struct platform_device *pdev)
771 return -EADDRNOTAVAIL; 1039 return -EADDRNOTAVAIL;
772 } 1040 }
773 1041
774 pdata = exynos_get_driver_data(pdev, data->id); 1042 pdata = devm_kzalloc(&pdev->dev,
775 if (!pdata) { 1043 sizeof(struct exynos_tmu_platform_data),
776 dev_err(&pdev->dev, "No platform init data supplied.\n"); 1044 GFP_KERNEL);
777 return -ENODEV; 1045 if (!pdata)
778 } 1046 return -ENOMEM;
779 1047
1048 exynos_of_sensor_conf(pdev->dev.of_node, pdata);
780 data->pdata = pdata; 1049 data->pdata = pdata;
781 data->soc = pdata->type; 1050 data->soc = exynos_of_get_soc_type(pdev->dev.of_node);
782 1051
783 switch (data->soc) { 1052 switch (data->soc) {
784 case SOC_ARCH_EXYNOS4210: 1053 case SOC_ARCH_EXYNOS4210:
@@ -806,6 +1075,13 @@ static int exynos_map_dt_data(struct platform_device *pdev)
806 data->tmu_set_emulation = exynos5440_tmu_set_emulation; 1075 data->tmu_set_emulation = exynos5440_tmu_set_emulation;
807 data->tmu_clear_irqs = exynos5440_tmu_clear_irqs; 1076 data->tmu_clear_irqs = exynos5440_tmu_clear_irqs;
808 break; 1077 break;
1078 case SOC_ARCH_EXYNOS7:
1079 data->tmu_initialize = exynos7_tmu_initialize;
1080 data->tmu_control = exynos7_tmu_control;
1081 data->tmu_read = exynos7_tmu_read;
1082 data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1083 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1084 break;
809 default: 1085 default:
810 dev_err(&pdev->dev, "Platform not supported\n"); 1086 dev_err(&pdev->dev, "Platform not supported\n");
811 return -EINVAL; 1087 return -EINVAL;
@@ -834,12 +1110,16 @@ static int exynos_map_dt_data(struct platform_device *pdev)
834 return 0; 1110 return 0;
835} 1111}
836 1112
1113static struct thermal_zone_of_device_ops exynos_sensor_ops = {
1114 .get_temp = exynos_get_temp,
1115 .set_emul_temp = exynos_tmu_set_emulation,
1116};
1117
837static int exynos_tmu_probe(struct platform_device *pdev) 1118static int exynos_tmu_probe(struct platform_device *pdev)
838{ 1119{
839 struct exynos_tmu_data *data;
840 struct exynos_tmu_platform_data *pdata; 1120 struct exynos_tmu_platform_data *pdata;
841 struct thermal_sensor_conf *sensor_conf; 1121 struct exynos_tmu_data *data;
842 int ret, i; 1122 int ret;
843 1123
844 data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data), 1124 data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
845 GFP_KERNEL); 1125 GFP_KERNEL);
@@ -849,9 +1129,15 @@ static int exynos_tmu_probe(struct platform_device *pdev)
849 platform_set_drvdata(pdev, data); 1129 platform_set_drvdata(pdev, data);
850 mutex_init(&data->lock); 1130 mutex_init(&data->lock);
851 1131
1132 data->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
1133 &exynos_sensor_ops);
1134 if (IS_ERR(data->tzd)) {
1135 pr_err("thermal: tz: %p ERROR\n", data->tzd);
1136 return PTR_ERR(data->tzd);
1137 }
852 ret = exynos_map_dt_data(pdev); 1138 ret = exynos_map_dt_data(pdev);
853 if (ret) 1139 if (ret)
854 return ret; 1140 goto err_sensor;
855 1141
856 pdata = data->pdata; 1142 pdata = data->pdata;
857 1143
@@ -860,20 +1146,22 @@ static int exynos_tmu_probe(struct platform_device *pdev)
860 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); 1146 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
861 if (IS_ERR(data->clk)) { 1147 if (IS_ERR(data->clk)) {
862 dev_err(&pdev->dev, "Failed to get clock\n"); 1148 dev_err(&pdev->dev, "Failed to get clock\n");
863 return PTR_ERR(data->clk); 1149 ret = PTR_ERR(data->clk);
1150 goto err_sensor;
864 } 1151 }
865 1152
866 data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif"); 1153 data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
867 if (IS_ERR(data->clk_sec)) { 1154 if (IS_ERR(data->clk_sec)) {
868 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) { 1155 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
869 dev_err(&pdev->dev, "Failed to get triminfo clock\n"); 1156 dev_err(&pdev->dev, "Failed to get triminfo clock\n");
870 return PTR_ERR(data->clk_sec); 1157 ret = PTR_ERR(data->clk_sec);
1158 goto err_sensor;
871 } 1159 }
872 } else { 1160 } else {
873 ret = clk_prepare(data->clk_sec); 1161 ret = clk_prepare(data->clk_sec);
874 if (ret) { 1162 if (ret) {
875 dev_err(&pdev->dev, "Failed to get clock\n"); 1163 dev_err(&pdev->dev, "Failed to get clock\n");
876 return ret; 1164 goto err_sensor;
877 } 1165 }
878 } 1166 }
879 1167
@@ -883,82 +1171,57 @@ static int exynos_tmu_probe(struct platform_device *pdev)
883 goto err_clk_sec; 1171 goto err_clk_sec;
884 } 1172 }
885 1173
886 ret = exynos_tmu_initialize(pdev); 1174 if (data->soc == SOC_ARCH_EXYNOS7) {
887 if (ret) { 1175 data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
888 dev_err(&pdev->dev, "Failed to initialize TMU\n"); 1176 if (IS_ERR(data->sclk)) {
889 goto err_clk; 1177 dev_err(&pdev->dev, "Failed to get sclk\n");
1178 goto err_clk;
1179 } else {
1180 ret = clk_prepare_enable(data->sclk);
1181 if (ret) {
1182 dev_err(&pdev->dev, "Failed to enable sclk\n");
1183 goto err_clk;
1184 }
1185 }
890 } 1186 }
891 1187
892 exynos_tmu_control(pdev, true); 1188 ret = exynos_tmu_initialize(pdev);
893
894 /* Allocate a structure to register with the exynos core thermal */
895 sensor_conf = devm_kzalloc(&pdev->dev,
896 sizeof(struct thermal_sensor_conf), GFP_KERNEL);
897 if (!sensor_conf) {
898 ret = -ENOMEM;
899 goto err_clk;
900 }
901 sprintf(sensor_conf->name, "therm_zone%d", data->id);
902 sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
903 sensor_conf->write_emul_temp =
904 (int (*)(void *, unsigned long))exynos_tmu_set_emulation;
905 sensor_conf->driver_data = data;
906 sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
907 pdata->trigger_enable[1] + pdata->trigger_enable[2]+
908 pdata->trigger_enable[3];
909
910 for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
911 sensor_conf->trip_data.trip_val[i] =
912 pdata->threshold + pdata->trigger_levels[i];
913 sensor_conf->trip_data.trip_type[i] =
914 pdata->trigger_type[i];
915 }
916
917 sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
918
919 sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
920 for (i = 0; i < pdata->freq_tab_count; i++) {
921 sensor_conf->cooling_data.freq_data[i].freq_clip_max =
922 pdata->freq_tab[i].freq_clip_max;
923 sensor_conf->cooling_data.freq_data[i].temp_level =
924 pdata->freq_tab[i].temp_level;
925 }
926 sensor_conf->dev = &pdev->dev;
927 /* Register the sensor with thermal management interface */
928 ret = exynos_register_thermal(sensor_conf);
929 if (ret) { 1189 if (ret) {
930 if (ret != -EPROBE_DEFER) 1190 dev_err(&pdev->dev, "Failed to initialize TMU\n");
931 dev_err(&pdev->dev, 1191 goto err_sclk;
932 "Failed to register thermal interface: %d\n",
933 ret);
934 goto err_clk;
935 } 1192 }
936 data->reg_conf = sensor_conf;
937 1193
938 ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq, 1194 ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
939 IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data); 1195 IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
940 if (ret) { 1196 if (ret) {
941 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq); 1197 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
942 goto err_clk; 1198 goto err_sclk;
943 } 1199 }
944 1200
1201 exynos_tmu_control(pdev, true);
945 return 0; 1202 return 0;
1203err_sclk:
1204 clk_disable_unprepare(data->sclk);
946err_clk: 1205err_clk:
947 clk_unprepare(data->clk); 1206 clk_unprepare(data->clk);
948err_clk_sec: 1207err_clk_sec:
949 if (!IS_ERR(data->clk_sec)) 1208 if (!IS_ERR(data->clk_sec))
950 clk_unprepare(data->clk_sec); 1209 clk_unprepare(data->clk_sec);
1210err_sensor:
1211 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
1212
951 return ret; 1213 return ret;
952} 1214}
953 1215
954static int exynos_tmu_remove(struct platform_device *pdev) 1216static int exynos_tmu_remove(struct platform_device *pdev)
955{ 1217{
956 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 1218 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
1219 struct thermal_zone_device *tzd = data->tzd;
957 1220
958 exynos_unregister_thermal(data->reg_conf); 1221 thermal_zone_of_sensor_unregister(&pdev->dev, tzd);
959
960 exynos_tmu_control(pdev, false); 1222 exynos_tmu_control(pdev, false);
961 1223
1224 clk_disable_unprepare(data->sclk);
962 clk_unprepare(data->clk); 1225 clk_unprepare(data->clk);
963 if (!IS_ERR(data->clk_sec)) 1226 if (!IS_ERR(data->clk_sec))
964 clk_unprepare(data->clk_sec); 1227 clk_unprepare(data->clk_sec);
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
index da3009bff6c4..4d71ec6c9aa0 100644
--- a/drivers/thermal/samsung/exynos_tmu.h
+++ b/drivers/thermal/samsung/exynos_tmu.h
@@ -23,16 +23,7 @@
23#ifndef _EXYNOS_TMU_H 23#ifndef _EXYNOS_TMU_H
24#define _EXYNOS_TMU_H 24#define _EXYNOS_TMU_H
25#include <linux/cpu_cooling.h> 25#include <linux/cpu_cooling.h>
26 26#include <dt-bindings/thermal/thermal_exynos.h>
27#include "exynos_thermal_common.h"
28
29enum calibration_type {
30 TYPE_ONE_POINT_TRIMMING,
31 TYPE_ONE_POINT_TRIMMING_25,
32 TYPE_ONE_POINT_TRIMMING_85,
33 TYPE_TWO_POINT_TRIMMING,
34 TYPE_NONE,
35};
36 27
37enum soc_type { 28enum soc_type {
38 SOC_ARCH_EXYNOS3250 = 1, 29 SOC_ARCH_EXYNOS3250 = 1,
@@ -43,38 +34,11 @@ enum soc_type {
43 SOC_ARCH_EXYNOS5420, 34 SOC_ARCH_EXYNOS5420,
44 SOC_ARCH_EXYNOS5420_TRIMINFO, 35 SOC_ARCH_EXYNOS5420_TRIMINFO,
45 SOC_ARCH_EXYNOS5440, 36 SOC_ARCH_EXYNOS5440,
37 SOC_ARCH_EXYNOS7,
46}; 38};
47 39
48/** 40/**
49 * struct exynos_tmu_platform_data 41 * struct exynos_tmu_platform_data
50 * @threshold: basic temperature for generating interrupt
51 * 25 <= threshold <= 125 [unit: degree Celsius]
52 * @threshold_falling: differntial value for setting threshold
53 * of temperature falling interrupt.
54 * @trigger_levels: array for each interrupt levels
55 * [unit: degree Celsius]
56 * 0: temperature for trigger_level0 interrupt
57 * condition for trigger_level0 interrupt:
58 * current temperature > threshold + trigger_levels[0]
59 * 1: temperature for trigger_level1 interrupt
60 * condition for trigger_level1 interrupt:
61 * current temperature > threshold + trigger_levels[1]
62 * 2: temperature for trigger_level2 interrupt
63 * condition for trigger_level2 interrupt:
64 * current temperature > threshold + trigger_levels[2]
65 * 3: temperature for trigger_level3 interrupt
66 * condition for trigger_level3 interrupt:
67 * current temperature > threshold + trigger_levels[3]
68 * @trigger_type: defines the type of trigger. Possible values are,
69 * THROTTLE_ACTIVE trigger type
70 * THROTTLE_PASSIVE trigger type
71 * SW_TRIP trigger type
72 * HW_TRIP
73 * @trigger_enable[]: array to denote which trigger levels are enabled.
74 * 1 = enable trigger_level[] interrupt,
75 * 0 = disable trigger_level[] interrupt
76 * @max_trigger_level: max trigger level supported by the TMU
77 * @non_hw_trigger_levels: number of defined non-hardware trigger levels
78 * @gain: gain of amplifier in the positive-TC generator block 42 * @gain: gain of amplifier in the positive-TC generator block
79 * 0 < gain <= 15 43 * 0 < gain <= 15
80 * @reference_voltage: reference voltage of amplifier 44 * @reference_voltage: reference voltage of amplifier
@@ -86,24 +50,12 @@ enum soc_type {
86 * @efuse_value: platform defined fuse value 50 * @efuse_value: platform defined fuse value
87 * @min_efuse_value: minimum valid trimming data 51 * @min_efuse_value: minimum valid trimming data
88 * @max_efuse_value: maximum valid trimming data 52 * @max_efuse_value: maximum valid trimming data
89 * @first_point_trim: temp value of the first point trimming
90 * @second_point_trim: temp value of the second point trimming
91 * @default_temp_offset: default temperature offset in case of no trimming 53 * @default_temp_offset: default temperature offset in case of no trimming
92 * @cal_type: calibration type for temperature 54 * @cal_type: calibration type for temperature
93 * @freq_clip_table: Table representing frequency reduction percentage.
94 * @freq_tab_count: Count of the above table as frequency reduction may
95 * applicable to only some of the trigger levels.
96 * 55 *
97 * This structure is required for configuration of exynos_tmu driver. 56 * This structure is required for configuration of exynos_tmu driver.
98 */ 57 */
99struct exynos_tmu_platform_data { 58struct exynos_tmu_platform_data {
100 u8 threshold;
101 u8 threshold_falling;
102 u8 trigger_levels[MAX_TRIP_COUNT];
103 enum trigger_type trigger_type[MAX_TRIP_COUNT];
104 bool trigger_enable[MAX_TRIP_COUNT];
105 u8 max_trigger_level;
106 u8 non_hw_trigger_levels;
107 u8 gain; 59 u8 gain;
108 u8 reference_voltage; 60 u8 reference_voltage;
109 u8 noise_cancel_mode; 61 u8 noise_cancel_mode;
@@ -115,30 +67,9 @@ struct exynos_tmu_platform_data {
115 u8 second_point_trim; 67 u8 second_point_trim;
116 u8 default_temp_offset; 68 u8 default_temp_offset;
117 69
118 enum calibration_type cal_type;
119 enum soc_type type; 70 enum soc_type type;
120 struct freq_clip_table freq_tab[4]; 71 u32 cal_type;
121 unsigned int freq_tab_count; 72 u32 cal_mode;
122};
123
124/**
125 * struct exynos_tmu_init_data
126 * @tmu_count: number of TMU instances.
127 * @tmu_data: platform data of all TMU instances.
128 * This structure is required to store data for multi-instance exynos tmu
129 * driver.
130 */
131struct exynos_tmu_init_data {
132 int tmu_count;
133 struct exynos_tmu_platform_data tmu_data[];
134}; 73};
135 74
136extern struct exynos_tmu_init_data const exynos3250_default_tmu_data;
137extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
138extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
139extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
140extern struct exynos_tmu_init_data const exynos5260_default_tmu_data;
141extern struct exynos_tmu_init_data const exynos5420_default_tmu_data;
142extern struct exynos_tmu_init_data const exynos5440_default_tmu_data;
143
144#endif /* _EXYNOS_TMU_H */ 75#endif /* _EXYNOS_TMU_H */
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
deleted file mode 100644
index b23910069f68..000000000000
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ /dev/null
@@ -1,264 +0,0 @@
1/*
2 * exynos_tmu_data.c - Samsung EXYNOS tmu data file
3 *
4 * Copyright (C) 2013 Samsung Electronics
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include "exynos_thermal_common.h"
24#include "exynos_tmu.h"
25
26struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
27 .tmu_data = {
28 {
29 .threshold = 80,
30 .trigger_levels[0] = 5,
31 .trigger_levels[1] = 20,
32 .trigger_levels[2] = 30,
33 .trigger_enable[0] = true,
34 .trigger_enable[1] = true,
35 .trigger_enable[2] = true,
36 .trigger_enable[3] = false,
37 .trigger_type[0] = THROTTLE_ACTIVE,
38 .trigger_type[1] = THROTTLE_ACTIVE,
39 .trigger_type[2] = SW_TRIP,
40 .max_trigger_level = 4,
41 .non_hw_trigger_levels = 3,
42 .gain = 15,
43 .reference_voltage = 7,
44 .cal_type = TYPE_ONE_POINT_TRIMMING,
45 .min_efuse_value = 40,
46 .max_efuse_value = 100,
47 .first_point_trim = 25,
48 .second_point_trim = 85,
49 .default_temp_offset = 50,
50 .freq_tab[0] = {
51 .freq_clip_max = 800 * 1000,
52 .temp_level = 85,
53 },
54 .freq_tab[1] = {
55 .freq_clip_max = 200 * 1000,
56 .temp_level = 100,
57 },
58 .freq_tab_count = 2,
59 .type = SOC_ARCH_EXYNOS4210,
60 },
61 },
62 .tmu_count = 1,
63};
64
65#define EXYNOS3250_TMU_DATA \
66 .threshold_falling = 10, \
67 .trigger_levels[0] = 70, \
68 .trigger_levels[1] = 95, \
69 .trigger_levels[2] = 110, \
70 .trigger_levels[3] = 120, \
71 .trigger_enable[0] = true, \
72 .trigger_enable[1] = true, \
73 .trigger_enable[2] = true, \
74 .trigger_enable[3] = false, \
75 .trigger_type[0] = THROTTLE_ACTIVE, \
76 .trigger_type[1] = THROTTLE_ACTIVE, \
77 .trigger_type[2] = SW_TRIP, \
78 .trigger_type[3] = HW_TRIP, \
79 .max_trigger_level = 4, \
80 .non_hw_trigger_levels = 3, \
81 .gain = 8, \
82 .reference_voltage = 16, \
83 .noise_cancel_mode = 4, \
84 .cal_type = TYPE_TWO_POINT_TRIMMING, \
85 .efuse_value = 55, \
86 .min_efuse_value = 40, \
87 .max_efuse_value = 100, \
88 .first_point_trim = 25, \
89 .second_point_trim = 85, \
90 .default_temp_offset = 50, \
91 .freq_tab[0] = { \
92 .freq_clip_max = 800 * 1000, \
93 .temp_level = 70, \
94 }, \
95 .freq_tab[1] = { \
96 .freq_clip_max = 400 * 1000, \
97 .temp_level = 95, \
98 }, \
99 .freq_tab_count = 2
100
101struct exynos_tmu_init_data const exynos3250_default_tmu_data = {
102 .tmu_data = {
103 {
104 EXYNOS3250_TMU_DATA,
105 .type = SOC_ARCH_EXYNOS3250,
106 },
107 },
108 .tmu_count = 1,
109};
110
111#define EXYNOS4412_TMU_DATA \
112 .threshold_falling = 10, \
113 .trigger_levels[0] = 70, \
114 .trigger_levels[1] = 95, \
115 .trigger_levels[2] = 110, \
116 .trigger_levels[3] = 120, \
117 .trigger_enable[0] = true, \
118 .trigger_enable[1] = true, \
119 .trigger_enable[2] = true, \
120 .trigger_enable[3] = false, \
121 .trigger_type[0] = THROTTLE_ACTIVE, \
122 .trigger_type[1] = THROTTLE_ACTIVE, \
123 .trigger_type[2] = SW_TRIP, \
124 .trigger_type[3] = HW_TRIP, \
125 .max_trigger_level = 4, \
126 .non_hw_trigger_levels = 3, \
127 .gain = 8, \
128 .reference_voltage = 16, \
129 .noise_cancel_mode = 4, \
130 .cal_type = TYPE_ONE_POINT_TRIMMING, \
131 .efuse_value = 55, \
132 .min_efuse_value = 40, \
133 .max_efuse_value = 100, \
134 .first_point_trim = 25, \
135 .second_point_trim = 85, \
136 .default_temp_offset = 50, \
137 .freq_tab[0] = { \
138 .freq_clip_max = 1400 * 1000, \
139 .temp_level = 70, \
140 }, \
141 .freq_tab[1] = { \
142 .freq_clip_max = 400 * 1000, \
143 .temp_level = 95, \
144 }, \
145 .freq_tab_count = 2
146
147struct exynos_tmu_init_data const exynos4412_default_tmu_data = {
148 .tmu_data = {
149 {
150 EXYNOS4412_TMU_DATA,
151 .type = SOC_ARCH_EXYNOS4412,
152 },
153 },
154 .tmu_count = 1,
155};
156
157struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
158 .tmu_data = {
159 {
160 EXYNOS4412_TMU_DATA,
161 .type = SOC_ARCH_EXYNOS5250,
162 },
163 },
164 .tmu_count = 1,
165};
166
167#define __EXYNOS5260_TMU_DATA \
168 .threshold_falling = 10, \
169 .trigger_levels[0] = 85, \
170 .trigger_levels[1] = 103, \
171 .trigger_levels[2] = 110, \
172 .trigger_levels[3] = 120, \
173 .trigger_enable[0] = true, \
174 .trigger_enable[1] = true, \
175 .trigger_enable[2] = true, \
176 .trigger_enable[3] = false, \
177 .trigger_type[0] = THROTTLE_ACTIVE, \
178 .trigger_type[1] = THROTTLE_ACTIVE, \
179 .trigger_type[2] = SW_TRIP, \
180 .trigger_type[3] = HW_TRIP, \
181 .max_trigger_level = 4, \
182 .non_hw_trigger_levels = 3, \
183 .gain = 8, \
184 .reference_voltage = 16, \
185 .noise_cancel_mode = 4, \
186 .cal_type = TYPE_ONE_POINT_TRIMMING, \
187 .efuse_value = 55, \
188 .min_efuse_value = 40, \
189 .max_efuse_value = 100, \
190 .first_point_trim = 25, \
191 .second_point_trim = 85, \
192 .default_temp_offset = 50, \
193 .freq_tab[0] = { \
194 .freq_clip_max = 800 * 1000, \
195 .temp_level = 85, \
196 }, \
197 .freq_tab[1] = { \
198 .freq_clip_max = 200 * 1000, \
199 .temp_level = 103, \
200 }, \
201 .freq_tab_count = 2, \
202
203#define EXYNOS5260_TMU_DATA \
204 __EXYNOS5260_TMU_DATA \
205 .type = SOC_ARCH_EXYNOS5260
206
207struct exynos_tmu_init_data const exynos5260_default_tmu_data = {
208 .tmu_data = {
209 { EXYNOS5260_TMU_DATA },
210 { EXYNOS5260_TMU_DATA },
211 { EXYNOS5260_TMU_DATA },
212 { EXYNOS5260_TMU_DATA },
213 { EXYNOS5260_TMU_DATA },
214 },
215 .tmu_count = 5,
216};
217
218#define EXYNOS5420_TMU_DATA \
219 __EXYNOS5260_TMU_DATA \
220 .type = SOC_ARCH_EXYNOS5420
221
222#define EXYNOS5420_TMU_DATA_SHARED \
223 __EXYNOS5260_TMU_DATA \
224 .type = SOC_ARCH_EXYNOS5420_TRIMINFO
225
226struct exynos_tmu_init_data const exynos5420_default_tmu_data = {
227 .tmu_data = {
228 { EXYNOS5420_TMU_DATA },
229 { EXYNOS5420_TMU_DATA },
230 { EXYNOS5420_TMU_DATA_SHARED },
231 { EXYNOS5420_TMU_DATA_SHARED },
232 { EXYNOS5420_TMU_DATA_SHARED },
233 },
234 .tmu_count = 5,
235};
236
237#define EXYNOS5440_TMU_DATA \
238 .trigger_levels[0] = 100, \
239 .trigger_levels[4] = 105, \
240 .trigger_enable[0] = 1, \
241 .trigger_type[0] = SW_TRIP, \
242 .trigger_type[4] = HW_TRIP, \
243 .max_trigger_level = 5, \
244 .non_hw_trigger_levels = 1, \
245 .gain = 5, \
246 .reference_voltage = 16, \
247 .noise_cancel_mode = 4, \
248 .cal_type = TYPE_ONE_POINT_TRIMMING, \
249 .efuse_value = 0x5b2d, \
250 .min_efuse_value = 16, \
251 .max_efuse_value = 76, \
252 .first_point_trim = 25, \
253 .second_point_trim = 70, \
254 .default_temp_offset = 25, \
255 .type = SOC_ARCH_EXYNOS5440
256
257struct exynos_tmu_init_data const exynos5440_default_tmu_data = {
258 .tmu_data = {
259 { EXYNOS5440_TMU_DATA } ,
260 { EXYNOS5440_TMU_DATA } ,
261 { EXYNOS5440_TMU_DATA } ,
262 },
263 .tmu_count = 3,
264};
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index fdd1f523a1ed..5a0f12d08e8b 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -45,7 +45,7 @@
45 * c. if the trend is THERMAL_TREND_RAISE_FULL, do nothing 45 * c. if the trend is THERMAL_TREND_RAISE_FULL, do nothing
46 * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit, 46 * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit,
47 * if the cooling state already equals lower limit, 47 * if the cooling state already equals lower limit,
48 * deactive the thermal instance 48 * deactivate the thermal instance
49 */ 49 */
50static unsigned long get_target_state(struct thermal_instance *instance, 50static unsigned long get_target_state(struct thermal_instance *instance,
51 enum thermal_trend trend, bool throttle) 51 enum thermal_trend trend, bool throttle)
@@ -169,7 +169,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
169} 169}
170 170
171/** 171/**
172 * step_wise_throttle - throttles devices asscciated with the given zone 172 * step_wise_throttle - throttles devices associated with the given zone
173 * @tz - thermal_zone_device 173 * @tz - thermal_zone_device
174 * @trip - the trip point 174 * @trip - the trip point
175 * @trip_type - type of the trip point 175 * @trip_type - type of the trip point
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 634b6ce0e63a..62a5d449c388 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1402,7 +1402,7 @@ int ti_bandgap_remove(struct platform_device *pdev)
1402 return 0; 1402 return 0;
1403} 1403}
1404 1404
1405#ifdef CONFIG_PM 1405#ifdef CONFIG_PM_SLEEP
1406static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp) 1406static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp)
1407{ 1407{
1408 int i; 1408 int i;
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 3fb054a10f6a..a38c1756442a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -429,7 +429,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
429 429
430 data = ti_bandgap_get_sensor_data(bgp, id); 430 data = ti_bandgap_get_sensor_data(bgp, id);
431 431
432 if (data && data->cool_dev) 432 if (data)
433 cpufreq_cooling_unregister(data->cool_dev); 433 cpufreq_cooling_unregister(data->cool_dev);
434 434
435 return 0; 435 return 0;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 5d916c7a216b..d2501f01cd03 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -489,7 +489,7 @@ config SERIAL_MFD_HSU
489 select SERIAL_CORE 489 select SERIAL_CORE
490 490
491config SERIAL_MFD_HSU_CONSOLE 491config SERIAL_MFD_HSU_CONSOLE
492 boolean "Medfile HSU serial console support" 492 bool "Medfile HSU serial console support"
493 depends on SERIAL_MFD_HSU=y 493 depends on SERIAL_MFD_HSU=y
494 select SERIAL_CORE_CONSOLE 494 select SERIAL_CORE_CONSOLE
495 495
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 96539038c03a..b454d05be583 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -45,7 +45,7 @@ menuconfig USB_GADGET
45if USB_GADGET 45if USB_GADGET
46 46
47config USB_GADGET_DEBUG 47config USB_GADGET_DEBUG
48 boolean "Debugging messages (DEVELOPMENT)" 48 bool "Debugging messages (DEVELOPMENT)"
49 depends on DEBUG_KERNEL 49 depends on DEBUG_KERNEL
50 help 50 help
51 Many controller and gadget drivers will print some debugging 51 Many controller and gadget drivers will print some debugging
@@ -73,7 +73,7 @@ config USB_GADGET_VERBOSE
73 production build. 73 production build.
74 74
75config USB_GADGET_DEBUG_FILES 75config USB_GADGET_DEBUG_FILES
76 boolean "Debugging information files (DEVELOPMENT)" 76 bool "Debugging information files (DEVELOPMENT)"
77 depends on PROC_FS 77 depends on PROC_FS
78 help 78 help
79 Some of the drivers in the "gadget" framework can expose 79 Some of the drivers in the "gadget" framework can expose
@@ -84,7 +84,7 @@ config USB_GADGET_DEBUG_FILES
84 here. If in doubt, or to conserve kernel memory, say "N". 84 here. If in doubt, or to conserve kernel memory, say "N".
85 85
86config USB_GADGET_DEBUG_FS 86config USB_GADGET_DEBUG_FS
87 boolean "Debugging information files in debugfs (DEVELOPMENT)" 87 bool "Debugging information files in debugfs (DEVELOPMENT)"
88 depends on DEBUG_FS 88 depends on DEBUG_FS
89 help 89 help
90 Some of the drivers in the "gadget" framework can expose 90 Some of the drivers in the "gadget" framework can expose
@@ -230,7 +230,7 @@ config USB_CONFIGFS
230 For more information see Documentation/usb/gadget_configfs.txt. 230 For more information see Documentation/usb/gadget_configfs.txt.
231 231
232config USB_CONFIGFS_SERIAL 232config USB_CONFIGFS_SERIAL
233 boolean "Generic serial bulk in/out" 233 bool "Generic serial bulk in/out"
234 depends on USB_CONFIGFS 234 depends on USB_CONFIGFS
235 depends on TTY 235 depends on TTY
236 select USB_U_SERIAL 236 select USB_U_SERIAL
@@ -239,7 +239,7 @@ config USB_CONFIGFS_SERIAL
239 The function talks to the Linux-USB generic serial driver. 239 The function talks to the Linux-USB generic serial driver.
240 240
241config USB_CONFIGFS_ACM 241config USB_CONFIGFS_ACM
242 boolean "Abstract Control Model (CDC ACM)" 242 bool "Abstract Control Model (CDC ACM)"
243 depends on USB_CONFIGFS 243 depends on USB_CONFIGFS
244 depends on TTY 244 depends on TTY
245 select USB_U_SERIAL 245 select USB_U_SERIAL
@@ -249,7 +249,7 @@ config USB_CONFIGFS_ACM
249 MS-Windows hosts or with the Linux-USB "cdc-acm" driver. 249 MS-Windows hosts or with the Linux-USB "cdc-acm" driver.
250 250
251config USB_CONFIGFS_OBEX 251config USB_CONFIGFS_OBEX
252 boolean "Object Exchange Model (CDC OBEX)" 252 bool "Object Exchange Model (CDC OBEX)"
253 depends on USB_CONFIGFS 253 depends on USB_CONFIGFS
254 depends on TTY 254 depends on TTY
255 select USB_U_SERIAL 255 select USB_U_SERIAL
@@ -259,7 +259,7 @@ config USB_CONFIGFS_OBEX
259 since the kernel itself doesn't implement the OBEX protocol. 259 since the kernel itself doesn't implement the OBEX protocol.
260 260
261config USB_CONFIGFS_NCM 261config USB_CONFIGFS_NCM
262 boolean "Network Control Model (CDC NCM)" 262 bool "Network Control Model (CDC NCM)"
263 depends on USB_CONFIGFS 263 depends on USB_CONFIGFS
264 depends on NET 264 depends on NET
265 select USB_U_ETHER 265 select USB_U_ETHER
@@ -270,7 +270,7 @@ config USB_CONFIGFS_NCM
270 different alignment possibilities. 270 different alignment possibilities.
271 271
272config USB_CONFIGFS_ECM 272config USB_CONFIGFS_ECM
273 boolean "Ethernet Control Model (CDC ECM)" 273 bool "Ethernet Control Model (CDC ECM)"
274 depends on USB_CONFIGFS 274 depends on USB_CONFIGFS
275 depends on NET 275 depends on NET
276 select USB_U_ETHER 276 select USB_U_ETHER
@@ -282,7 +282,7 @@ config USB_CONFIGFS_ECM
282 supported by firmware for smart network devices. 282 supported by firmware for smart network devices.
283 283
284config USB_CONFIGFS_ECM_SUBSET 284config USB_CONFIGFS_ECM_SUBSET
285 boolean "Ethernet Control Model (CDC ECM) subset" 285 bool "Ethernet Control Model (CDC ECM) subset"
286 depends on USB_CONFIGFS 286 depends on USB_CONFIGFS
287 depends on NET 287 depends on NET
288 select USB_U_ETHER 288 select USB_U_ETHER
@@ -323,7 +323,7 @@ config USB_CONFIGFS_EEM
323 the host is the same (a usbX device), so the differences are minimal. 323 the host is the same (a usbX device), so the differences are minimal.
324 324
325config USB_CONFIGFS_PHONET 325config USB_CONFIGFS_PHONET
326 boolean "Phonet protocol" 326 bool "Phonet protocol"
327 depends on USB_CONFIGFS 327 depends on USB_CONFIGFS
328 depends on NET 328 depends on NET
329 depends on PHONET 329 depends on PHONET
@@ -333,7 +333,7 @@ config USB_CONFIGFS_PHONET
333 The Phonet protocol implementation for USB device. 333 The Phonet protocol implementation for USB device.
334 334
335config USB_CONFIGFS_MASS_STORAGE 335config USB_CONFIGFS_MASS_STORAGE
336 boolean "Mass storage" 336 bool "Mass storage"
337 depends on USB_CONFIGFS 337 depends on USB_CONFIGFS
338 depends on BLOCK 338 depends on BLOCK
339 select USB_F_MASS_STORAGE 339 select USB_F_MASS_STORAGE
@@ -344,7 +344,7 @@ config USB_CONFIGFS_MASS_STORAGE
344 specified as a module parameter or sysfs option. 344 specified as a module parameter or sysfs option.
345 345
346config USB_CONFIGFS_F_LB_SS 346config USB_CONFIGFS_F_LB_SS
347 boolean "Loopback and sourcesink function (for testing)" 347 bool "Loopback and sourcesink function (for testing)"
348 depends on USB_CONFIGFS 348 depends on USB_CONFIGFS
349 select USB_F_SS_LB 349 select USB_F_SS_LB
350 help 350 help
@@ -357,7 +357,7 @@ config USB_CONFIGFS_F_LB_SS
357 and its driver through a basic set of functional tests. 357 and its driver through a basic set of functional tests.
358 358
359config USB_CONFIGFS_F_FS 359config USB_CONFIGFS_F_FS
360 boolean "Function filesystem (FunctionFS)" 360 bool "Function filesystem (FunctionFS)"
361 depends on USB_CONFIGFS 361 depends on USB_CONFIGFS
362 select USB_F_FS 362 select USB_F_FS
363 help 363 help
@@ -369,7 +369,7 @@ config USB_CONFIGFS_F_FS
369 mass storage) and other are implemented in user space. 369 mass storage) and other are implemented in user space.
370 370
371config USB_CONFIGFS_F_UAC1 371config USB_CONFIGFS_F_UAC1
372 boolean "Audio Class 1.0" 372 bool "Audio Class 1.0"
373 depends on USB_CONFIGFS 373 depends on USB_CONFIGFS
374 depends on SND 374 depends on SND
375 select USB_LIBCOMPOSITE 375 select USB_LIBCOMPOSITE
@@ -382,7 +382,7 @@ config USB_CONFIGFS_F_UAC1
382 on the device. 382 on the device.
383 383
384config USB_CONFIGFS_F_UAC2 384config USB_CONFIGFS_F_UAC2
385 boolean "Audio Class 2.0" 385 bool "Audio Class 2.0"
386 depends on USB_CONFIGFS 386 depends on USB_CONFIGFS
387 depends on SND 387 depends on SND
388 select USB_LIBCOMPOSITE 388 select USB_LIBCOMPOSITE
@@ -400,7 +400,7 @@ config USB_CONFIGFS_F_UAC2
400 wants as audio data to the USB Host. 400 wants as audio data to the USB Host.
401 401
402config USB_CONFIGFS_F_MIDI 402config USB_CONFIGFS_F_MIDI
403 boolean "MIDI function" 403 bool "MIDI function"
404 depends on USB_CONFIGFS 404 depends on USB_CONFIGFS
405 depends on SND 405 depends on SND
406 select USB_LIBCOMPOSITE 406 select USB_LIBCOMPOSITE
@@ -414,7 +414,7 @@ config USB_CONFIGFS_F_MIDI
414 ALSA's aconnect utility etc. 414 ALSA's aconnect utility etc.
415 415
416config USB_CONFIGFS_F_HID 416config USB_CONFIGFS_F_HID
417 boolean "HID function" 417 bool "HID function"
418 depends on USB_CONFIGFS 418 depends on USB_CONFIGFS
419 select USB_F_HID 419 select USB_F_HID
420 help 420 help
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index fd48ef3af4eb..113c87e22117 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -40,7 +40,7 @@ config USB_ZERO
40 dynamically linked module called "g_zero". 40 dynamically linked module called "g_zero".
41 41
42config USB_ZERO_HNPTEST 42config USB_ZERO_HNPTEST
43 boolean "HNP Test Device" 43 bool "HNP Test Device"
44 depends on USB_ZERO && USB_OTG 44 depends on USB_ZERO && USB_OTG
45 help 45 help
46 You can configure this device to enumerate using the device 46 You can configure this device to enumerate using the device
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 366e551aeff0..9a3a6b00391a 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -199,7 +199,7 @@ config USB_S3C2410
199 S3C2440 processors. 199 S3C2440 processors.
200 200
201config USB_S3C2410_DEBUG 201config USB_S3C2410_DEBUG
202 boolean "S3C2410 udc debug messages" 202 bool "S3C2410 udc debug messages"
203 depends on USB_S3C2410 203 depends on USB_S3C2410
204 204
205config USB_S3C_HSUDC 205config USB_S3C_HSUDC
@@ -288,7 +288,7 @@ config USB_NET2272
288 gadget drivers to also be dynamically linked. 288 gadget drivers to also be dynamically linked.
289 289
290config USB_NET2272_DMA 290config USB_NET2272_DMA
291 boolean "Support external DMA controller" 291 bool "Support external DMA controller"
292 depends on USB_NET2272 && HAS_DMA 292 depends on USB_NET2272 && HAS_DMA
293 help 293 help
294 The NET2272 part can optionally support an external DMA 294 The NET2272 part can optionally support an external DMA
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index c6d0c8e745b9..52d3d58252e1 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -119,7 +119,7 @@ config TAHVO_USB
119 119
120config TAHVO_USB_HOST_BY_DEFAULT 120config TAHVO_USB_HOST_BY_DEFAULT
121 depends on TAHVO_USB 121 depends on TAHVO_USB
122 boolean "Device in USB host mode by default" 122 bool "Device in USB host mode by default"
123 help 123 help
124 Say Y here, if you want the device to enter USB host mode 124 Say Y here, if you want the device to enter USB host mode
125 by default on bootup. 125 by default on bootup.
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 7cc0122a18ce..f8a186381ae8 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -239,9 +239,12 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
239 239
240 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1; 240 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
241 } 241 }
242 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) 242 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
243 if (pci_is_pcie(vdev->pdev)) 243 if (pci_is_pcie(vdev->pdev))
244 return 1; 244 return 1;
245 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
246 return 1;
247 }
245 248
246 return 0; 249 return 0;
247} 250}
@@ -464,6 +467,7 @@ static long vfio_pci_ioctl(void *device_data,
464 467
465 switch (info.index) { 468 switch (info.index) {
466 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX: 469 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
470 case VFIO_PCI_REQ_IRQ_INDEX:
467 break; 471 break;
468 case VFIO_PCI_ERR_IRQ_INDEX: 472 case VFIO_PCI_ERR_IRQ_INDEX:
469 if (pci_is_pcie(vdev->pdev)) 473 if (pci_is_pcie(vdev->pdev))
@@ -828,6 +832,20 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
828 req_len, vma->vm_page_prot); 832 req_len, vma->vm_page_prot);
829} 833}
830 834
835static void vfio_pci_request(void *device_data, unsigned int count)
836{
837 struct vfio_pci_device *vdev = device_data;
838
839 mutex_lock(&vdev->igate);
840
841 if (vdev->req_trigger) {
842 dev_dbg(&vdev->pdev->dev, "Requesting device from user\n");
843 eventfd_signal(vdev->req_trigger, 1);
844 }
845
846 mutex_unlock(&vdev->igate);
847}
848
831static const struct vfio_device_ops vfio_pci_ops = { 849static const struct vfio_device_ops vfio_pci_ops = {
832 .name = "vfio-pci", 850 .name = "vfio-pci",
833 .open = vfio_pci_open, 851 .open = vfio_pci_open,
@@ -836,6 +854,7 @@ static const struct vfio_device_ops vfio_pci_ops = {
836 .read = vfio_pci_read, 854 .read = vfio_pci_read,
837 .write = vfio_pci_write, 855 .write = vfio_pci_write,
838 .mmap = vfio_pci_mmap, 856 .mmap = vfio_pci_mmap,
857 .request = vfio_pci_request,
839}; 858};
840 859
841static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 860static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index e8d695b3f54e..f88bfdf5b6a0 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -763,46 +763,70 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
763 return 0; 763 return 0;
764} 764}
765 765
766static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, 766static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
767 unsigned index, unsigned start, 767 uint32_t flags, void *data)
768 unsigned count, uint32_t flags, void *data)
769{ 768{
770 int32_t fd = *(int32_t *)data; 769 int32_t fd = *(int32_t *)data;
771 770
772 if ((index != VFIO_PCI_ERR_IRQ_INDEX) || 771 if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
773 !(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
774 return -EINVAL; 772 return -EINVAL;
775 773
776 /* DATA_NONE/DATA_BOOL enables loopback testing */ 774 /* DATA_NONE/DATA_BOOL enables loopback testing */
777 if (flags & VFIO_IRQ_SET_DATA_NONE) { 775 if (flags & VFIO_IRQ_SET_DATA_NONE) {
778 if (vdev->err_trigger) 776 if (*ctx)
779 eventfd_signal(vdev->err_trigger, 1); 777 eventfd_signal(*ctx, 1);
780 return 0; 778 return 0;
781 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 779 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
782 uint8_t trigger = *(uint8_t *)data; 780 uint8_t trigger = *(uint8_t *)data;
783 if (trigger && vdev->err_trigger) 781 if (trigger && *ctx)
784 eventfd_signal(vdev->err_trigger, 1); 782 eventfd_signal(*ctx, 1);
785 return 0; 783 return 0;
786 } 784 }
787 785
788 /* Handle SET_DATA_EVENTFD */ 786 /* Handle SET_DATA_EVENTFD */
789 if (fd == -1) { 787 if (fd == -1) {
790 if (vdev->err_trigger) 788 if (*ctx)
791 eventfd_ctx_put(vdev->err_trigger); 789 eventfd_ctx_put(*ctx);
792 vdev->err_trigger = NULL; 790 *ctx = NULL;
793 return 0; 791 return 0;
794 } else if (fd >= 0) { 792 } else if (fd >= 0) {
795 struct eventfd_ctx *efdctx; 793 struct eventfd_ctx *efdctx;
796 efdctx = eventfd_ctx_fdget(fd); 794 efdctx = eventfd_ctx_fdget(fd);
797 if (IS_ERR(efdctx)) 795 if (IS_ERR(efdctx))
798 return PTR_ERR(efdctx); 796 return PTR_ERR(efdctx);
799 if (vdev->err_trigger) 797 if (*ctx)
800 eventfd_ctx_put(vdev->err_trigger); 798 eventfd_ctx_put(*ctx);
801 vdev->err_trigger = efdctx; 799 *ctx = efdctx;
802 return 0; 800 return 0;
803 } else 801 } else
804 return -EINVAL; 802 return -EINVAL;
805} 803}
804
805static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
806 unsigned index, unsigned start,
807 unsigned count, uint32_t flags, void *data)
808{
809 if (index != VFIO_PCI_ERR_IRQ_INDEX)
810 return -EINVAL;
811
812 /*
813 * We should sanitize start & count, but that wasn't caught
814 * originally, so this IRQ index must forever ignore them :-(
815 */
816
817 return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data);
818}
819
820static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
821 unsigned index, unsigned start,
822 unsigned count, uint32_t flags, void *data)
823{
824 if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1)
825 return -EINVAL;
826
827 return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data);
828}
829
806int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, 830int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
807 unsigned index, unsigned start, unsigned count, 831 unsigned index, unsigned start, unsigned count,
808 void *data) 832 void *data)
@@ -844,6 +868,12 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
844 func = vfio_pci_set_err_trigger; 868 func = vfio_pci_set_err_trigger;
845 break; 869 break;
846 } 870 }
871 case VFIO_PCI_REQ_IRQ_INDEX:
872 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
873 case VFIO_IRQ_SET_ACTION_TRIGGER:
874 func = vfio_pci_set_req_trigger;
875 break;
876 }
847 } 877 }
848 878
849 if (!func) 879 if (!func)
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 671c17a6e6d0..c9f9b323f152 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -58,6 +58,7 @@ struct vfio_pci_device {
58 struct pci_saved_state *pci_saved_state; 58 struct pci_saved_state *pci_saved_state;
59 int refcnt; 59 int refcnt;
60 struct eventfd_ctx *err_trigger; 60 struct eventfd_ctx *err_trigger;
61 struct eventfd_ctx *req_trigger;
61}; 62};
62 63
63#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) 64#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index f018d8d0f975..4cde85501444 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -63,6 +63,11 @@ struct vfio_container {
63 void *iommu_data; 63 void *iommu_data;
64}; 64};
65 65
66struct vfio_unbound_dev {
67 struct device *dev;
68 struct list_head unbound_next;
69};
70
66struct vfio_group { 71struct vfio_group {
67 struct kref kref; 72 struct kref kref;
68 int minor; 73 int minor;
@@ -75,6 +80,8 @@ struct vfio_group {
75 struct notifier_block nb; 80 struct notifier_block nb;
76 struct list_head vfio_next; 81 struct list_head vfio_next;
77 struct list_head container_next; 82 struct list_head container_next;
83 struct list_head unbound_list;
84 struct mutex unbound_lock;
78 atomic_t opened; 85 atomic_t opened;
79}; 86};
80 87
@@ -204,6 +211,8 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
204 kref_init(&group->kref); 211 kref_init(&group->kref);
205 INIT_LIST_HEAD(&group->device_list); 212 INIT_LIST_HEAD(&group->device_list);
206 mutex_init(&group->device_lock); 213 mutex_init(&group->device_lock);
214 INIT_LIST_HEAD(&group->unbound_list);
215 mutex_init(&group->unbound_lock);
207 atomic_set(&group->container_users, 0); 216 atomic_set(&group->container_users, 0);
208 atomic_set(&group->opened, 0); 217 atomic_set(&group->opened, 0);
209 group->iommu_group = iommu_group; 218 group->iommu_group = iommu_group;
@@ -264,13 +273,22 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
264static void vfio_group_release(struct kref *kref) 273static void vfio_group_release(struct kref *kref)
265{ 274{
266 struct vfio_group *group = container_of(kref, struct vfio_group, kref); 275 struct vfio_group *group = container_of(kref, struct vfio_group, kref);
276 struct vfio_unbound_dev *unbound, *tmp;
277 struct iommu_group *iommu_group = group->iommu_group;
267 278
268 WARN_ON(!list_empty(&group->device_list)); 279 WARN_ON(!list_empty(&group->device_list));
269 280
281 list_for_each_entry_safe(unbound, tmp,
282 &group->unbound_list, unbound_next) {
283 list_del(&unbound->unbound_next);
284 kfree(unbound);
285 }
286
270 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor)); 287 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
271 list_del(&group->vfio_next); 288 list_del(&group->vfio_next);
272 vfio_free_group_minor(group->minor); 289 vfio_free_group_minor(group->minor);
273 vfio_group_unlock_and_free(group); 290 vfio_group_unlock_and_free(group);
291 iommu_group_put(iommu_group);
274} 292}
275 293
276static void vfio_group_put(struct vfio_group *group) 294static void vfio_group_put(struct vfio_group *group)
@@ -440,17 +458,36 @@ static bool vfio_whitelisted_driver(struct device_driver *drv)
440} 458}
441 459
442/* 460/*
443 * A vfio group is viable for use by userspace if all devices are either 461 * A vfio group is viable for use by userspace if all devices are in
444 * driver-less or bound to a vfio or whitelisted driver. We test the 462 * one of the following states:
445 * latter by the existence of a struct vfio_device matching the dev. 463 * - driver-less
464 * - bound to a vfio driver
465 * - bound to a whitelisted driver
466 *
467 * We use two methods to determine whether a device is bound to a vfio
468 * driver. The first is to test whether the device exists in the vfio
469 * group. The second is to test if the device exists on the group
470 * unbound_list, indicating it's in the middle of transitioning from
471 * a vfio driver to driver-less.
446 */ 472 */
447static int vfio_dev_viable(struct device *dev, void *data) 473static int vfio_dev_viable(struct device *dev, void *data)
448{ 474{
449 struct vfio_group *group = data; 475 struct vfio_group *group = data;
450 struct vfio_device *device; 476 struct vfio_device *device;
451 struct device_driver *drv = ACCESS_ONCE(dev->driver); 477 struct device_driver *drv = ACCESS_ONCE(dev->driver);
478 struct vfio_unbound_dev *unbound;
479 int ret = -EINVAL;
452 480
453 if (!drv || vfio_whitelisted_driver(drv)) 481 mutex_lock(&group->unbound_lock);
482 list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
483 if (dev == unbound->dev) {
484 ret = 0;
485 break;
486 }
487 }
488 mutex_unlock(&group->unbound_lock);
489
490 if (!ret || !drv || vfio_whitelisted_driver(drv))
454 return 0; 491 return 0;
455 492
456 device = vfio_group_get_device(group, dev); 493 device = vfio_group_get_device(group, dev);
@@ -459,7 +496,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
459 return 0; 496 return 0;
460 } 497 }
461 498
462 return -EINVAL; 499 return ret;
463} 500}
464 501
465/** 502/**
@@ -501,6 +538,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
501{ 538{
502 struct vfio_group *group = container_of(nb, struct vfio_group, nb); 539 struct vfio_group *group = container_of(nb, struct vfio_group, nb);
503 struct device *dev = data; 540 struct device *dev = data;
541 struct vfio_unbound_dev *unbound;
504 542
505 /* 543 /*
506 * Need to go through a group_lock lookup to get a reference or we 544 * Need to go through a group_lock lookup to get a reference or we
@@ -550,6 +588,17 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
550 * stop the system to maintain isolation. At a minimum, we'd 588 * stop the system to maintain isolation. At a minimum, we'd
551 * want a toggle to disable driver auto probe for this device. 589 * want a toggle to disable driver auto probe for this device.
552 */ 590 */
591
592 mutex_lock(&group->unbound_lock);
593 list_for_each_entry(unbound,
594 &group->unbound_list, unbound_next) {
595 if (dev == unbound->dev) {
596 list_del(&unbound->unbound_next);
597 kfree(unbound);
598 break;
599 }
600 }
601 mutex_unlock(&group->unbound_lock);
553 break; 602 break;
554 } 603 }
555 604
@@ -578,6 +627,12 @@ int vfio_add_group_dev(struct device *dev,
578 iommu_group_put(iommu_group); 627 iommu_group_put(iommu_group);
579 return PTR_ERR(group); 628 return PTR_ERR(group);
580 } 629 }
630 } else {
631 /*
632 * A found vfio_group already holds a reference to the
633 * iommu_group. A created vfio_group keeps the reference.
634 */
635 iommu_group_put(iommu_group);
581 } 636 }
582 637
583 device = vfio_group_get_device(group, dev); 638 device = vfio_group_get_device(group, dev);
@@ -586,21 +641,19 @@ int vfio_add_group_dev(struct device *dev,
586 dev_name(dev), iommu_group_id(iommu_group)); 641 dev_name(dev), iommu_group_id(iommu_group));
587 vfio_device_put(device); 642 vfio_device_put(device);
588 vfio_group_put(group); 643 vfio_group_put(group);
589 iommu_group_put(iommu_group);
590 return -EBUSY; 644 return -EBUSY;
591 } 645 }
592 646
593 device = vfio_group_create_device(group, dev, ops, device_data); 647 device = vfio_group_create_device(group, dev, ops, device_data);
594 if (IS_ERR(device)) { 648 if (IS_ERR(device)) {
595 vfio_group_put(group); 649 vfio_group_put(group);
596 iommu_group_put(iommu_group);
597 return PTR_ERR(device); 650 return PTR_ERR(device);
598 } 651 }
599 652
600 /* 653 /*
601 * Added device holds reference to iommu_group and vfio_device 654 * Drop all but the vfio_device reference. The vfio_device holds
602 * (which in turn holds reference to vfio_group). Drop extra 655 * a reference to the vfio_group, which holds a reference to the
603 * group reference used while acquiring device. 656 * iommu_group.
604 */ 657 */
605 vfio_group_put(group); 658 vfio_group_put(group);
606 659
@@ -655,8 +708,9 @@ void *vfio_del_group_dev(struct device *dev)
655{ 708{
656 struct vfio_device *device = dev_get_drvdata(dev); 709 struct vfio_device *device = dev_get_drvdata(dev);
657 struct vfio_group *group = device->group; 710 struct vfio_group *group = device->group;
658 struct iommu_group *iommu_group = group->iommu_group;
659 void *device_data = device->device_data; 711 void *device_data = device->device_data;
712 struct vfio_unbound_dev *unbound;
713 unsigned int i = 0;
660 714
661 /* 715 /*
662 * The group exists so long as we have a device reference. Get 716 * The group exists so long as we have a device reference. Get
@@ -664,14 +718,49 @@ void *vfio_del_group_dev(struct device *dev)
664 */ 718 */
665 vfio_group_get(group); 719 vfio_group_get(group);
666 720
721 /*
722 * When the device is removed from the group, the group suddenly
723 * becomes non-viable; the device has a driver (until the unbind
724 * completes), but it's not present in the group. This is bad news
725 * for any external users that need to re-acquire a group reference
726 * in order to match and release their existing reference. To
727 * solve this, we track such devices on the unbound_list to bridge
728 * the gap until they're fully unbound.
729 */
730 unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
731 if (unbound) {
732 unbound->dev = dev;
733 mutex_lock(&group->unbound_lock);
734 list_add(&unbound->unbound_next, &group->unbound_list);
735 mutex_unlock(&group->unbound_lock);
736 }
737 WARN_ON(!unbound);
738
667 vfio_device_put(device); 739 vfio_device_put(device);
668 740
669 /* TODO send a signal to encourage this to be released */ 741 /*
670 wait_event(vfio.release_q, !vfio_dev_present(group, dev)); 742 * If the device is still present in the group after the above
743 * 'put', then it is in use and we need to request it from the
744 * bus driver. The driver may in turn need to request the
745 * device from the user. We send the request on an arbitrary
746 * interval with counter to allow the driver to take escalating
747 * measures to release the device if it has the ability to do so.
748 */
749 do {
750 device = vfio_group_get_device(group, dev);
751 if (!device)
752 break;
671 753
672 vfio_group_put(group); 754 if (device->ops->request)
755 device->ops->request(device_data, i++);
673 756
674 iommu_group_put(iommu_group); 757 vfio_device_put(device);
758
759 } while (wait_event_interruptible_timeout(vfio.release_q,
760 !vfio_dev_present(group, dev),
761 HZ * 10) <= 0);
762
763 vfio_group_put(group);
675 764
676 return device_data; 765 return device_data;
677} 766}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 4a9d666f1e91..57d8c37a002b 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -66,6 +66,7 @@ struct vfio_domain {
66 struct list_head next; 66 struct list_head next;
67 struct list_head group_list; 67 struct list_head group_list;
68 int prot; /* IOMMU_CACHE */ 68 int prot; /* IOMMU_CACHE */
69 bool fgsp; /* Fine-grained super pages */
69}; 70};
70 71
71struct vfio_dma { 72struct vfio_dma {
@@ -264,6 +265,7 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
264 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 265 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
265 bool lock_cap = capable(CAP_IPC_LOCK); 266 bool lock_cap = capable(CAP_IPC_LOCK);
266 long ret, i; 267 long ret, i;
268 bool rsvd;
267 269
268 if (!current->mm) 270 if (!current->mm)
269 return -ENODEV; 271 return -ENODEV;
@@ -272,10 +274,9 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
272 if (ret) 274 if (ret)
273 return ret; 275 return ret;
274 276
275 if (is_invalid_reserved_pfn(*pfn_base)) 277 rsvd = is_invalid_reserved_pfn(*pfn_base);
276 return 1;
277 278
278 if (!lock_cap && current->mm->locked_vm + 1 > limit) { 279 if (!rsvd && !lock_cap && current->mm->locked_vm + 1 > limit) {
279 put_pfn(*pfn_base, prot); 280 put_pfn(*pfn_base, prot);
280 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, 281 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
281 limit << PAGE_SHIFT); 282 limit << PAGE_SHIFT);
@@ -283,7 +284,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
283 } 284 }
284 285
285 if (unlikely(disable_hugepages)) { 286 if (unlikely(disable_hugepages)) {
286 vfio_lock_acct(1); 287 if (!rsvd)
288 vfio_lock_acct(1);
287 return 1; 289 return 1;
288 } 290 }
289 291
@@ -295,12 +297,14 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
295 if (ret) 297 if (ret)
296 break; 298 break;
297 299
298 if (pfn != *pfn_base + i || is_invalid_reserved_pfn(pfn)) { 300 if (pfn != *pfn_base + i ||
301 rsvd != is_invalid_reserved_pfn(pfn)) {
299 put_pfn(pfn, prot); 302 put_pfn(pfn, prot);
300 break; 303 break;
301 } 304 }
302 305
303 if (!lock_cap && current->mm->locked_vm + i + 1 > limit) { 306 if (!rsvd && !lock_cap &&
307 current->mm->locked_vm + i + 1 > limit) {
304 put_pfn(pfn, prot); 308 put_pfn(pfn, prot);
305 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", 309 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
306 __func__, limit << PAGE_SHIFT); 310 __func__, limit << PAGE_SHIFT);
@@ -308,7 +312,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
308 } 312 }
309 } 313 }
310 314
311 vfio_lock_acct(i); 315 if (!rsvd)
316 vfio_lock_acct(i);
312 317
313 return i; 318 return i;
314} 319}
@@ -346,12 +351,14 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
346 domain = d = list_first_entry(&iommu->domain_list, 351 domain = d = list_first_entry(&iommu->domain_list,
347 struct vfio_domain, next); 352 struct vfio_domain, next);
348 353
349 list_for_each_entry_continue(d, &iommu->domain_list, next) 354 list_for_each_entry_continue(d, &iommu->domain_list, next) {
350 iommu_unmap(d->domain, dma->iova, dma->size); 355 iommu_unmap(d->domain, dma->iova, dma->size);
356 cond_resched();
357 }
351 358
352 while (iova < end) { 359 while (iova < end) {
353 size_t unmapped; 360 size_t unmapped, len;
354 phys_addr_t phys; 361 phys_addr_t phys, next;
355 362
356 phys = iommu_iova_to_phys(domain->domain, iova); 363 phys = iommu_iova_to_phys(domain->domain, iova);
357 if (WARN_ON(!phys)) { 364 if (WARN_ON(!phys)) {
@@ -359,7 +366,19 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
359 continue; 366 continue;
360 } 367 }
361 368
362 unmapped = iommu_unmap(domain->domain, iova, PAGE_SIZE); 369 /*
370 * To optimize for fewer iommu_unmap() calls, each of which
371 * may require hardware cache flushing, try to find the
372 * largest contiguous physical memory chunk to unmap.
373 */
374 for (len = PAGE_SIZE;
375 !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
376 next = iommu_iova_to_phys(domain->domain, iova + len);
377 if (next != phys + len)
378 break;
379 }
380
381 unmapped = iommu_unmap(domain->domain, iova, len);
363 if (WARN_ON(!unmapped)) 382 if (WARN_ON(!unmapped))
364 break; 383 break;
365 384
@@ -367,6 +386,8 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
367 unmapped >> PAGE_SHIFT, 386 unmapped >> PAGE_SHIFT,
368 dma->prot, false); 387 dma->prot, false);
369 iova += unmapped; 388 iova += unmapped;
389
390 cond_resched();
370 } 391 }
371 392
372 vfio_lock_acct(-unlocked); 393 vfio_lock_acct(-unlocked);
@@ -511,6 +532,8 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
511 map_try_harder(d, iova, pfn, npage, prot)) 532 map_try_harder(d, iova, pfn, npage, prot))
512 goto unwind; 533 goto unwind;
513 } 534 }
535
536 cond_resched();
514 } 537 }
515 538
516 return 0; 539 return 0;
@@ -665,6 +688,39 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
665 return 0; 688 return 0;
666} 689}
667 690
691/*
692 * We change our unmap behavior slightly depending on whether the IOMMU
693 * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage
694 * for practically any contiguous power-of-two mapping we give it. This means
695 * we don't need to look for contiguous chunks ourselves to make unmapping
696 * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d
697 * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
698 * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
699 * hugetlbfs is in use.
700 */
701static void vfio_test_domain_fgsp(struct vfio_domain *domain)
702{
703 struct page *pages;
704 int ret, order = get_order(PAGE_SIZE * 2);
705
706 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
707 if (!pages)
708 return;
709
710 ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
711 IOMMU_READ | IOMMU_WRITE | domain->prot);
712 if (!ret) {
713 size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
714
715 if (unmapped == PAGE_SIZE)
716 iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
717 else
718 domain->fgsp = true;
719 }
720
721 __free_pages(pages, order);
722}
723
668static int vfio_iommu_type1_attach_group(void *iommu_data, 724static int vfio_iommu_type1_attach_group(void *iommu_data,
669 struct iommu_group *iommu_group) 725 struct iommu_group *iommu_group)
670{ 726{
@@ -758,6 +814,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
758 } 814 }
759 } 815 }
760 816
817 vfio_test_domain_fgsp(domain);
818
761 /* replay mappings on new domains */ 819 /* replay mappings on new domains */
762 ret = vfio_iommu_replay(iommu, domain); 820 ret = vfio_iommu_replay(iommu, domain);
763 if (ret) 821 if (ret)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 633012cc9a57..18f05bff8826 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -591,11 +591,6 @@ static void handle_rx(struct vhost_net *net)
591 * TODO: support TSO. 591 * TODO: support TSO.
592 */ 592 */
593 iov_iter_advance(&msg.msg_iter, vhost_hlen); 593 iov_iter_advance(&msg.msg_iter, vhost_hlen);
594 } else {
595 /* It'll come from socket; we'll need to patch
596 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
597 */
598 iov_iter_advance(&fixup, sizeof(hdr));
599 } 594 }
600 err = sock->ops->recvmsg(sock, &msg, 595 err = sock->ops->recvmsg(sock, &msg,
601 sock_len, MSG_DONTWAIT | MSG_TRUNC); 596 sock_len, MSG_DONTWAIT | MSG_TRUNC);
@@ -609,17 +604,25 @@ static void handle_rx(struct vhost_net *net)
609 continue; 604 continue;
610 } 605 }
611 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ 606 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
612 if (unlikely(vhost_hlen) && 607 if (unlikely(vhost_hlen)) {
613 copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) { 608 if (copy_to_iter(&hdr, sizeof(hdr),
614 vq_err(vq, "Unable to write vnet_hdr at addr %p\n", 609 &fixup) != sizeof(hdr)) {
615 vq->iov->iov_base); 610 vq_err(vq, "Unable to write vnet_hdr "
616 break; 611 "at addr %p\n", vq->iov->iov_base);
612 break;
613 }
614 } else {
615 /* Header came from socket; we'll need to patch
616 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
617 */
618 iov_iter_advance(&fixup, sizeof(hdr));
617 } 619 }
618 /* TODO: Should check and handle checksum. */ 620 /* TODO: Should check and handle checksum. */
619 621
620 num_buffers = cpu_to_vhost16(vq, headcount); 622 num_buffers = cpu_to_vhost16(vq, headcount);
621 if (likely(mergeable) && 623 if (likely(mergeable) &&
622 copy_to_iter(&num_buffers, 2, &fixup) != 2) { 624 copy_to_iter(&num_buffers, sizeof num_buffers,
625 &fixup) != sizeof num_buffers) {
623 vq_err(vq, "Failed num_buffers write"); 626 vq_err(vq, "Failed num_buffers write");
624 vhost_discard_vq_desc(vq, headcount); 627 vhost_discard_vq_desc(vq, headcount);
625 break; 628 break;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index dc78d87e0fc2..8d4f3f1ff799 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -38,7 +38,6 @@
38#include <linux/miscdevice.h> 38#include <linux/miscdevice.h>
39#include <asm/unaligned.h> 39#include <asm/unaligned.h>
40#include <scsi/scsi.h> 40#include <scsi/scsi.h>
41#include <scsi/scsi_tcq.h>
42#include <target/target_core_base.h> 41#include <target/target_core_base.h>
43#include <target/target_core_fabric.h> 42#include <target/target_core_fabric.h>
44#include <target/target_core_fabric_configfs.h> 43#include <target/target_core_fabric_configfs.h>
@@ -52,13 +51,13 @@
52 51
53#include "vhost.h" 52#include "vhost.h"
54 53
55#define TCM_VHOST_VERSION "v0.1" 54#define VHOST_SCSI_VERSION "v0.1"
56#define TCM_VHOST_NAMELEN 256 55#define VHOST_SCSI_NAMELEN 256
57#define TCM_VHOST_MAX_CDB_SIZE 32 56#define VHOST_SCSI_MAX_CDB_SIZE 32
58#define TCM_VHOST_DEFAULT_TAGS 256 57#define VHOST_SCSI_DEFAULT_TAGS 256
59#define TCM_VHOST_PREALLOC_SGLS 2048 58#define VHOST_SCSI_PREALLOC_SGLS 2048
60#define TCM_VHOST_PREALLOC_UPAGES 2048 59#define VHOST_SCSI_PREALLOC_UPAGES 2048
61#define TCM_VHOST_PREALLOC_PROT_SGLS 512 60#define VHOST_SCSI_PREALLOC_PROT_SGLS 512
62 61
63struct vhost_scsi_inflight { 62struct vhost_scsi_inflight {
64 /* Wait for the flush operation to finish */ 63 /* Wait for the flush operation to finish */
@@ -67,11 +66,13 @@ struct vhost_scsi_inflight {
67 struct kref kref; 66 struct kref kref;
68}; 67};
69 68
70struct tcm_vhost_cmd { 69struct vhost_scsi_cmd {
71 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ 70 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
72 int tvc_vq_desc; 71 int tvc_vq_desc;
73 /* virtio-scsi initiator task attribute */ 72 /* virtio-scsi initiator task attribute */
74 int tvc_task_attr; 73 int tvc_task_attr;
74 /* virtio-scsi response incoming iovecs */
75 int tvc_in_iovs;
75 /* virtio-scsi initiator data direction */ 76 /* virtio-scsi initiator data direction */
76 enum dma_data_direction tvc_data_direction; 77 enum dma_data_direction tvc_data_direction;
77 /* Expected data transfer length from virtio-scsi header */ 78 /* Expected data transfer length from virtio-scsi header */
@@ -81,26 +82,26 @@ struct tcm_vhost_cmd {
81 /* The number of scatterlists associated with this cmd */ 82 /* The number of scatterlists associated with this cmd */
82 u32 tvc_sgl_count; 83 u32 tvc_sgl_count;
83 u32 tvc_prot_sgl_count; 84 u32 tvc_prot_sgl_count;
84 /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */ 85 /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
85 u32 tvc_lun; 86 u32 tvc_lun;
86 /* Pointer to the SGL formatted memory from virtio-scsi */ 87 /* Pointer to the SGL formatted memory from virtio-scsi */
87 struct scatterlist *tvc_sgl; 88 struct scatterlist *tvc_sgl;
88 struct scatterlist *tvc_prot_sgl; 89 struct scatterlist *tvc_prot_sgl;
89 struct page **tvc_upages; 90 struct page **tvc_upages;
90 /* Pointer to response */ 91 /* Pointer to response header iovec */
91 struct virtio_scsi_cmd_resp __user *tvc_resp; 92 struct iovec *tvc_resp_iov;
92 /* Pointer to vhost_scsi for our device */ 93 /* Pointer to vhost_scsi for our device */
93 struct vhost_scsi *tvc_vhost; 94 struct vhost_scsi *tvc_vhost;
94 /* Pointer to vhost_virtqueue for the cmd */ 95 /* Pointer to vhost_virtqueue for the cmd */
95 struct vhost_virtqueue *tvc_vq; 96 struct vhost_virtqueue *tvc_vq;
96 /* Pointer to vhost nexus memory */ 97 /* Pointer to vhost nexus memory */
97 struct tcm_vhost_nexus *tvc_nexus; 98 struct vhost_scsi_nexus *tvc_nexus;
98 /* The TCM I/O descriptor that is accessed via container_of() */ 99 /* The TCM I/O descriptor that is accessed via container_of() */
99 struct se_cmd tvc_se_cmd; 100 struct se_cmd tvc_se_cmd;
100 /* work item used for cmwq dispatch to tcm_vhost_submission_work() */ 101 /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
101 struct work_struct work; 102 struct work_struct work;
102 /* Copy of the incoming SCSI command descriptor block (CDB) */ 103 /* Copy of the incoming SCSI command descriptor block (CDB) */
103 unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE]; 104 unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
104 /* Sense buffer that will be mapped into outgoing status */ 105 /* Sense buffer that will be mapped into outgoing status */
105 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; 106 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
106 /* Completed commands list, serviced from vhost worker thread */ 107 /* Completed commands list, serviced from vhost worker thread */
@@ -109,53 +110,53 @@ struct tcm_vhost_cmd {
109 struct vhost_scsi_inflight *inflight; 110 struct vhost_scsi_inflight *inflight;
110}; 111};
111 112
112struct tcm_vhost_nexus { 113struct vhost_scsi_nexus {
113 /* Pointer to TCM session for I_T Nexus */ 114 /* Pointer to TCM session for I_T Nexus */
114 struct se_session *tvn_se_sess; 115 struct se_session *tvn_se_sess;
115}; 116};
116 117
117struct tcm_vhost_nacl { 118struct vhost_scsi_nacl {
118 /* Binary World Wide unique Port Name for Vhost Initiator port */ 119 /* Binary World Wide unique Port Name for Vhost Initiator port */
119 u64 iport_wwpn; 120 u64 iport_wwpn;
120 /* ASCII formatted WWPN for Sas Initiator port */ 121 /* ASCII formatted WWPN for Sas Initiator port */
121 char iport_name[TCM_VHOST_NAMELEN]; 122 char iport_name[VHOST_SCSI_NAMELEN];
122 /* Returned by tcm_vhost_make_nodeacl() */ 123 /* Returned by vhost_scsi_make_nodeacl() */
123 struct se_node_acl se_node_acl; 124 struct se_node_acl se_node_acl;
124}; 125};
125 126
126struct tcm_vhost_tpg { 127struct vhost_scsi_tpg {
127 /* Vhost port target portal group tag for TCM */ 128 /* Vhost port target portal group tag for TCM */
128 u16 tport_tpgt; 129 u16 tport_tpgt;
129 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ 130 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
130 int tv_tpg_port_count; 131 int tv_tpg_port_count;
131 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ 132 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
132 int tv_tpg_vhost_count; 133 int tv_tpg_vhost_count;
133 /* list for tcm_vhost_list */ 134 /* list for vhost_scsi_list */
134 struct list_head tv_tpg_list; 135 struct list_head tv_tpg_list;
135 /* Used to protect access for tpg_nexus */ 136 /* Used to protect access for tpg_nexus */
136 struct mutex tv_tpg_mutex; 137 struct mutex tv_tpg_mutex;
137 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ 138 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
138 struct tcm_vhost_nexus *tpg_nexus; 139 struct vhost_scsi_nexus *tpg_nexus;
139 /* Pointer back to tcm_vhost_tport */ 140 /* Pointer back to vhost_scsi_tport */
140 struct tcm_vhost_tport *tport; 141 struct vhost_scsi_tport *tport;
141 /* Returned by tcm_vhost_make_tpg() */ 142 /* Returned by vhost_scsi_make_tpg() */
142 struct se_portal_group se_tpg; 143 struct se_portal_group se_tpg;
143 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ 144 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
144 struct vhost_scsi *vhost_scsi; 145 struct vhost_scsi *vhost_scsi;
145}; 146};
146 147
147struct tcm_vhost_tport { 148struct vhost_scsi_tport {
148 /* SCSI protocol the tport is providing */ 149 /* SCSI protocol the tport is providing */
149 u8 tport_proto_id; 150 u8 tport_proto_id;
150 /* Binary World Wide unique Port Name for Vhost Target port */ 151 /* Binary World Wide unique Port Name for Vhost Target port */
151 u64 tport_wwpn; 152 u64 tport_wwpn;
152 /* ASCII formatted WWPN for Vhost Target port */ 153 /* ASCII formatted WWPN for Vhost Target port */
153 char tport_name[TCM_VHOST_NAMELEN]; 154 char tport_name[VHOST_SCSI_NAMELEN];
154 /* Returned by tcm_vhost_make_tport() */ 155 /* Returned by vhost_scsi_make_tport() */
155 struct se_wwn tport_wwn; 156 struct se_wwn tport_wwn;
156}; 157};
157 158
158struct tcm_vhost_evt { 159struct vhost_scsi_evt {
159 /* event to be sent to guest */ 160 /* event to be sent to guest */
160 struct virtio_scsi_event event; 161 struct virtio_scsi_event event;
161 /* event list, serviced from vhost worker thread */ 162 /* event list, serviced from vhost worker thread */
@@ -171,7 +172,9 @@ enum {
171/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ 172/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
172enum { 173enum {
173 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | 174 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
174 (1ULL << VIRTIO_SCSI_F_T10_PI) 175 (1ULL << VIRTIO_SCSI_F_T10_PI) |
176 (1ULL << VIRTIO_F_ANY_LAYOUT) |
177 (1ULL << VIRTIO_F_VERSION_1)
175}; 178};
176 179
177#define VHOST_SCSI_MAX_TARGET 256 180#define VHOST_SCSI_MAX_TARGET 256
@@ -195,7 +198,7 @@ struct vhost_scsi_virtqueue {
195 198
196struct vhost_scsi { 199struct vhost_scsi {
197 /* Protected by vhost_scsi->dev.mutex */ 200 /* Protected by vhost_scsi->dev.mutex */
198 struct tcm_vhost_tpg **vs_tpg; 201 struct vhost_scsi_tpg **vs_tpg;
199 char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; 202 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
200 203
201 struct vhost_dev dev; 204 struct vhost_dev dev;
@@ -212,21 +215,21 @@ struct vhost_scsi {
212}; 215};
213 216
214/* Local pointer to allocated TCM configfs fabric module */ 217/* Local pointer to allocated TCM configfs fabric module */
215static struct target_fabric_configfs *tcm_vhost_fabric_configfs; 218static struct target_fabric_configfs *vhost_scsi_fabric_configfs;
216 219
217static struct workqueue_struct *tcm_vhost_workqueue; 220static struct workqueue_struct *vhost_scsi_workqueue;
218 221
219/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */ 222/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
220static DEFINE_MUTEX(tcm_vhost_mutex); 223static DEFINE_MUTEX(vhost_scsi_mutex);
221static LIST_HEAD(tcm_vhost_list); 224static LIST_HEAD(vhost_scsi_list);
222 225
223static int iov_num_pages(struct iovec *iov) 226static int iov_num_pages(void __user *iov_base, size_t iov_len)
224{ 227{
225 return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) - 228 return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
226 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT; 229 ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
227} 230}
228 231
229static void tcm_vhost_done_inflight(struct kref *kref) 232static void vhost_scsi_done_inflight(struct kref *kref)
230{ 233{
231 struct vhost_scsi_inflight *inflight; 234 struct vhost_scsi_inflight *inflight;
232 235
@@ -234,7 +237,7 @@ static void tcm_vhost_done_inflight(struct kref *kref)
234 complete(&inflight->comp); 237 complete(&inflight->comp);
235} 238}
236 239
237static void tcm_vhost_init_inflight(struct vhost_scsi *vs, 240static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
238 struct vhost_scsi_inflight *old_inflight[]) 241 struct vhost_scsi_inflight *old_inflight[])
239{ 242{
240 struct vhost_scsi_inflight *new_inflight; 243 struct vhost_scsi_inflight *new_inflight;
@@ -262,7 +265,7 @@ static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
262} 265}
263 266
264static struct vhost_scsi_inflight * 267static struct vhost_scsi_inflight *
265tcm_vhost_get_inflight(struct vhost_virtqueue *vq) 268vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
266{ 269{
267 struct vhost_scsi_inflight *inflight; 270 struct vhost_scsi_inflight *inflight;
268 struct vhost_scsi_virtqueue *svq; 271 struct vhost_scsi_virtqueue *svq;
@@ -274,31 +277,31 @@ tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
274 return inflight; 277 return inflight;
275} 278}
276 279
277static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight) 280static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
278{ 281{
279 kref_put(&inflight->kref, tcm_vhost_done_inflight); 282 kref_put(&inflight->kref, vhost_scsi_done_inflight);
280} 283}
281 284
282static int tcm_vhost_check_true(struct se_portal_group *se_tpg) 285static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
283{ 286{
284 return 1; 287 return 1;
285} 288}
286 289
287static int tcm_vhost_check_false(struct se_portal_group *se_tpg) 290static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
288{ 291{
289 return 0; 292 return 0;
290} 293}
291 294
292static char *tcm_vhost_get_fabric_name(void) 295static char *vhost_scsi_get_fabric_name(void)
293{ 296{
294 return "vhost"; 297 return "vhost";
295} 298}
296 299
297static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg) 300static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
298{ 301{
299 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 302 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
300 struct tcm_vhost_tpg, se_tpg); 303 struct vhost_scsi_tpg, se_tpg);
301 struct tcm_vhost_tport *tport = tpg->tport; 304 struct vhost_scsi_tport *tport = tpg->tport;
302 305
303 switch (tport->tport_proto_id) { 306 switch (tport->tport_proto_id) {
304 case SCSI_PROTOCOL_SAS: 307 case SCSI_PROTOCOL_SAS:
@@ -316,37 +319,37 @@ static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
316 return sas_get_fabric_proto_ident(se_tpg); 319 return sas_get_fabric_proto_ident(se_tpg);
317} 320}
318 321
319static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg) 322static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
320{ 323{
321 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 324 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
322 struct tcm_vhost_tpg, se_tpg); 325 struct vhost_scsi_tpg, se_tpg);
323 struct tcm_vhost_tport *tport = tpg->tport; 326 struct vhost_scsi_tport *tport = tpg->tport;
324 327
325 return &tport->tport_name[0]; 328 return &tport->tport_name[0];
326} 329}
327 330
328static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg) 331static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
329{ 332{
330 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 333 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
331 struct tcm_vhost_tpg, se_tpg); 334 struct vhost_scsi_tpg, se_tpg);
332 return tpg->tport_tpgt; 335 return tpg->tport_tpgt;
333} 336}
334 337
335static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg) 338static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg)
336{ 339{
337 return 1; 340 return 1;
338} 341}
339 342
340static u32 343static u32
341tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, 344vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg,
342 struct se_node_acl *se_nacl, 345 struct se_node_acl *se_nacl,
343 struct t10_pr_registration *pr_reg, 346 struct t10_pr_registration *pr_reg,
344 int *format_code, 347 int *format_code,
345 unsigned char *buf) 348 unsigned char *buf)
346{ 349{
347 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 350 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
348 struct tcm_vhost_tpg, se_tpg); 351 struct vhost_scsi_tpg, se_tpg);
349 struct tcm_vhost_tport *tport = tpg->tport; 352 struct vhost_scsi_tport *tport = tpg->tport;
350 353
351 switch (tport->tport_proto_id) { 354 switch (tport->tport_proto_id) {
352 case SCSI_PROTOCOL_SAS: 355 case SCSI_PROTOCOL_SAS:
@@ -369,14 +372,14 @@ tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
369} 372}
370 373
371static u32 374static u32
372tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, 375vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg,
373 struct se_node_acl *se_nacl, 376 struct se_node_acl *se_nacl,
374 struct t10_pr_registration *pr_reg, 377 struct t10_pr_registration *pr_reg,
375 int *format_code) 378 int *format_code)
376{ 379{
377 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 380 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
378 struct tcm_vhost_tpg, se_tpg); 381 struct vhost_scsi_tpg, se_tpg);
379 struct tcm_vhost_tport *tport = tpg->tport; 382 struct vhost_scsi_tport *tport = tpg->tport;
380 383
381 switch (tport->tport_proto_id) { 384 switch (tport->tport_proto_id) {
382 case SCSI_PROTOCOL_SAS: 385 case SCSI_PROTOCOL_SAS:
@@ -399,14 +402,14 @@ tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
399} 402}
400 403
401static char * 404static char *
402tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, 405vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
403 const char *buf, 406 const char *buf,
404 u32 *out_tid_len, 407 u32 *out_tid_len,
405 char **port_nexus_ptr) 408 char **port_nexus_ptr)
406{ 409{
407 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 410 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
408 struct tcm_vhost_tpg, se_tpg); 411 struct vhost_scsi_tpg, se_tpg);
409 struct tcm_vhost_tport *tport = tpg->tport; 412 struct vhost_scsi_tport *tport = tpg->tport;
410 413
411 switch (tport->tport_proto_id) { 414 switch (tport->tport_proto_id) {
412 case SCSI_PROTOCOL_SAS: 415 case SCSI_PROTOCOL_SAS:
@@ -429,13 +432,13 @@ tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
429} 432}
430 433
431static struct se_node_acl * 434static struct se_node_acl *
432tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg) 435vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg)
433{ 436{
434 struct tcm_vhost_nacl *nacl; 437 struct vhost_scsi_nacl *nacl;
435 438
436 nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL); 439 nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL);
437 if (!nacl) { 440 if (!nacl) {
438 pr_err("Unable to allocate struct tcm_vhost_nacl\n"); 441 pr_err("Unable to allocate struct vhost_scsi_nacl\n");
439 return NULL; 442 return NULL;
440 } 443 }
441 444
@@ -443,24 +446,24 @@ tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
443} 446}
444 447
445static void 448static void
446tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg, 449vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg,
447 struct se_node_acl *se_nacl) 450 struct se_node_acl *se_nacl)
448{ 451{
449 struct tcm_vhost_nacl *nacl = container_of(se_nacl, 452 struct vhost_scsi_nacl *nacl = container_of(se_nacl,
450 struct tcm_vhost_nacl, se_node_acl); 453 struct vhost_scsi_nacl, se_node_acl);
451 kfree(nacl); 454 kfree(nacl);
452} 455}
453 456
454static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg) 457static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
455{ 458{
456 return 1; 459 return 1;
457} 460}
458 461
459static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) 462static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
460{ 463{
461 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, 464 struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
462 struct tcm_vhost_cmd, tvc_se_cmd); 465 struct vhost_scsi_cmd, tvc_se_cmd);
463 struct se_session *se_sess = se_cmd->se_sess; 466 struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
464 int i; 467 int i;
465 468
466 if (tv_cmd->tvc_sgl_count) { 469 if (tv_cmd->tvc_sgl_count) {
@@ -472,53 +475,53 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
472 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i])); 475 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
473 } 476 }
474 477
475 tcm_vhost_put_inflight(tv_cmd->inflight); 478 vhost_scsi_put_inflight(tv_cmd->inflight);
476 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 479 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
477} 480}
478 481
479static int tcm_vhost_shutdown_session(struct se_session *se_sess) 482static int vhost_scsi_shutdown_session(struct se_session *se_sess)
480{ 483{
481 return 0; 484 return 0;
482} 485}
483 486
484static void tcm_vhost_close_session(struct se_session *se_sess) 487static void vhost_scsi_close_session(struct se_session *se_sess)
485{ 488{
486 return; 489 return;
487} 490}
488 491
489static u32 tcm_vhost_sess_get_index(struct se_session *se_sess) 492static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
490{ 493{
491 return 0; 494 return 0;
492} 495}
493 496
494static int tcm_vhost_write_pending(struct se_cmd *se_cmd) 497static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
495{ 498{
496 /* Go ahead and process the write immediately */ 499 /* Go ahead and process the write immediately */
497 target_execute_cmd(se_cmd); 500 target_execute_cmd(se_cmd);
498 return 0; 501 return 0;
499} 502}
500 503
501static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd) 504static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
502{ 505{
503 return 0; 506 return 0;
504} 507}
505 508
506static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl) 509static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
507{ 510{
508 return; 511 return;
509} 512}
510 513
511static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd) 514static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd)
512{ 515{
513 return 0; 516 return 0;
514} 517}
515 518
516static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd) 519static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
517{ 520{
518 return 0; 521 return 0;
519} 522}
520 523
521static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd) 524static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
522{ 525{
523 struct vhost_scsi *vs = cmd->tvc_vhost; 526 struct vhost_scsi *vs = cmd->tvc_vhost;
524 527
@@ -527,44 +530,44 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
527 vhost_work_queue(&vs->dev, &vs->vs_completion_work); 530 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
528} 531}
529 532
530static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) 533static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
531{ 534{
532 struct tcm_vhost_cmd *cmd = container_of(se_cmd, 535 struct vhost_scsi_cmd *cmd = container_of(se_cmd,
533 struct tcm_vhost_cmd, tvc_se_cmd); 536 struct vhost_scsi_cmd, tvc_se_cmd);
534 vhost_scsi_complete_cmd(cmd); 537 vhost_scsi_complete_cmd(cmd);
535 return 0; 538 return 0;
536} 539}
537 540
538static int tcm_vhost_queue_status(struct se_cmd *se_cmd) 541static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
539{ 542{
540 struct tcm_vhost_cmd *cmd = container_of(se_cmd, 543 struct vhost_scsi_cmd *cmd = container_of(se_cmd,
541 struct tcm_vhost_cmd, tvc_se_cmd); 544 struct vhost_scsi_cmd, tvc_se_cmd);
542 vhost_scsi_complete_cmd(cmd); 545 vhost_scsi_complete_cmd(cmd);
543 return 0; 546 return 0;
544} 547}
545 548
546static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) 549static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
547{ 550{
548 return; 551 return;
549} 552}
550 553
551static void tcm_vhost_aborted_task(struct se_cmd *se_cmd) 554static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
552{ 555{
553 return; 556 return;
554} 557}
555 558
556static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) 559static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
557{ 560{
558 vs->vs_events_nr--; 561 vs->vs_events_nr--;
559 kfree(evt); 562 kfree(evt);
560} 563}
561 564
562static struct tcm_vhost_evt * 565static struct vhost_scsi_evt *
563tcm_vhost_allocate_evt(struct vhost_scsi *vs, 566vhost_scsi_allocate_evt(struct vhost_scsi *vs,
564 u32 event, u32 reason) 567 u32 event, u32 reason)
565{ 568{
566 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 569 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
567 struct tcm_vhost_evt *evt; 570 struct vhost_scsi_evt *evt;
568 571
569 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { 572 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
570 vs->vs_events_missed = true; 573 vs->vs_events_missed = true;
@@ -573,7 +576,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
573 576
574 evt = kzalloc(sizeof(*evt), GFP_KERNEL); 577 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
575 if (!evt) { 578 if (!evt) {
576 vq_err(vq, "Failed to allocate tcm_vhost_evt\n"); 579 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
577 vs->vs_events_missed = true; 580 vs->vs_events_missed = true;
578 return NULL; 581 return NULL;
579 } 582 }
@@ -585,7 +588,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
585 return evt; 588 return evt;
586} 589}
587 590
588static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd) 591static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
589{ 592{
590 struct se_cmd *se_cmd = &cmd->tvc_se_cmd; 593 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
591 594
@@ -600,7 +603,7 @@ static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
600} 603}
601 604
602static void 605static void
603tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) 606vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
604{ 607{
605 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 608 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
606 struct virtio_scsi_event *event = &evt->event; 609 struct virtio_scsi_event *event = &evt->event;
@@ -646,24 +649,24 @@ again:
646 if (!ret) 649 if (!ret)
647 vhost_add_used_and_signal(&vs->dev, vq, head, 0); 650 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
648 else 651 else
649 vq_err(vq, "Faulted on tcm_vhost_send_event\n"); 652 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
650} 653}
651 654
652static void tcm_vhost_evt_work(struct vhost_work *work) 655static void vhost_scsi_evt_work(struct vhost_work *work)
653{ 656{
654 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 657 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
655 vs_event_work); 658 vs_event_work);
656 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 659 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
657 struct tcm_vhost_evt *evt; 660 struct vhost_scsi_evt *evt;
658 struct llist_node *llnode; 661 struct llist_node *llnode;
659 662
660 mutex_lock(&vq->mutex); 663 mutex_lock(&vq->mutex);
661 llnode = llist_del_all(&vs->vs_event_list); 664 llnode = llist_del_all(&vs->vs_event_list);
662 while (llnode) { 665 while (llnode) {
663 evt = llist_entry(llnode, struct tcm_vhost_evt, list); 666 evt = llist_entry(llnode, struct vhost_scsi_evt, list);
664 llnode = llist_next(llnode); 667 llnode = llist_next(llnode);
665 tcm_vhost_do_evt_work(vs, evt); 668 vhost_scsi_do_evt_work(vs, evt);
666 tcm_vhost_free_evt(vs, evt); 669 vhost_scsi_free_evt(vs, evt);
667 } 670 }
668 mutex_unlock(&vq->mutex); 671 mutex_unlock(&vq->mutex);
669} 672}
@@ -679,15 +682,16 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
679 vs_completion_work); 682 vs_completion_work);
680 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ); 683 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
681 struct virtio_scsi_cmd_resp v_rsp; 684 struct virtio_scsi_cmd_resp v_rsp;
682 struct tcm_vhost_cmd *cmd; 685 struct vhost_scsi_cmd *cmd;
683 struct llist_node *llnode; 686 struct llist_node *llnode;
684 struct se_cmd *se_cmd; 687 struct se_cmd *se_cmd;
688 struct iov_iter iov_iter;
685 int ret, vq; 689 int ret, vq;
686 690
687 bitmap_zero(signal, VHOST_SCSI_MAX_VQ); 691 bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
688 llnode = llist_del_all(&vs->vs_completion_list); 692 llnode = llist_del_all(&vs->vs_completion_list);
689 while (llnode) { 693 while (llnode) {
690 cmd = llist_entry(llnode, struct tcm_vhost_cmd, 694 cmd = llist_entry(llnode, struct vhost_scsi_cmd,
691 tvc_completion_list); 695 tvc_completion_list);
692 llnode = llist_next(llnode); 696 llnode = llist_next(llnode);
693 se_cmd = &cmd->tvc_se_cmd; 697 se_cmd = &cmd->tvc_se_cmd;
@@ -703,8 +707,11 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
703 se_cmd->scsi_sense_length); 707 se_cmd->scsi_sense_length);
704 memcpy(v_rsp.sense, cmd->tvc_sense_buf, 708 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
705 se_cmd->scsi_sense_length); 709 se_cmd->scsi_sense_length);
706 ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); 710
707 if (likely(ret == 0)) { 711 iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
712 cmd->tvc_in_iovs, sizeof(v_rsp));
713 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
714 if (likely(ret == sizeof(v_rsp))) {
708 struct vhost_scsi_virtqueue *q; 715 struct vhost_scsi_virtqueue *q;
709 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0); 716 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
710 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); 717 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
@@ -722,13 +729,13 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
722 vhost_signal(&vs->dev, &vs->vqs[vq].vq); 729 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
723} 730}
724 731
725static struct tcm_vhost_cmd * 732static struct vhost_scsi_cmd *
726vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, 733vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
727 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr, 734 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
728 u32 exp_data_len, int data_direction) 735 u32 exp_data_len, int data_direction)
729{ 736{
730 struct tcm_vhost_cmd *cmd; 737 struct vhost_scsi_cmd *cmd;
731 struct tcm_vhost_nexus *tv_nexus; 738 struct vhost_scsi_nexus *tv_nexus;
732 struct se_session *se_sess; 739 struct se_session *se_sess;
733 struct scatterlist *sg, *prot_sg; 740 struct scatterlist *sg, *prot_sg;
734 struct page **pages; 741 struct page **pages;
@@ -736,22 +743,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
736 743
737 tv_nexus = tpg->tpg_nexus; 744 tv_nexus = tpg->tpg_nexus;
738 if (!tv_nexus) { 745 if (!tv_nexus) {
739 pr_err("Unable to locate active struct tcm_vhost_nexus\n"); 746 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
740 return ERR_PTR(-EIO); 747 return ERR_PTR(-EIO);
741 } 748 }
742 se_sess = tv_nexus->tvn_se_sess; 749 se_sess = tv_nexus->tvn_se_sess;
743 750
744 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 751 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
745 if (tag < 0) { 752 if (tag < 0) {
746 pr_err("Unable to obtain tag for tcm_vhost_cmd\n"); 753 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
747 return ERR_PTR(-ENOMEM); 754 return ERR_PTR(-ENOMEM);
748 } 755 }
749 756
750 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; 757 cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
751 sg = cmd->tvc_sgl; 758 sg = cmd->tvc_sgl;
752 prot_sg = cmd->tvc_prot_sgl; 759 prot_sg = cmd->tvc_prot_sgl;
753 pages = cmd->tvc_upages; 760 pages = cmd->tvc_upages;
754 memset(cmd, 0, sizeof(struct tcm_vhost_cmd)); 761 memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
755 762
756 cmd->tvc_sgl = sg; 763 cmd->tvc_sgl = sg;
757 cmd->tvc_prot_sgl = prot_sg; 764 cmd->tvc_prot_sgl = prot_sg;
@@ -763,9 +770,9 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
763 cmd->tvc_exp_data_len = exp_data_len; 770 cmd->tvc_exp_data_len = exp_data_len;
764 cmd->tvc_data_direction = data_direction; 771 cmd->tvc_data_direction = data_direction;
765 cmd->tvc_nexus = tv_nexus; 772 cmd->tvc_nexus = tv_nexus;
766 cmd->inflight = tcm_vhost_get_inflight(vq); 773 cmd->inflight = vhost_scsi_get_inflight(vq);
767 774
768 memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE); 775 memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
769 776
770 return cmd; 777 return cmd;
771} 778}
@@ -776,29 +783,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
776 * Returns the number of scatterlist entries used or -errno on error. 783 * Returns the number of scatterlist entries used or -errno on error.
777 */ 784 */
778static int 785static int
779vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd, 786vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
787 void __user *ptr,
788 size_t len,
780 struct scatterlist *sgl, 789 struct scatterlist *sgl,
781 unsigned int sgl_count,
782 struct iovec *iov,
783 struct page **pages,
784 bool write) 790 bool write)
785{ 791{
786 unsigned int npages = 0, pages_nr, offset, nbytes; 792 unsigned int npages = 0, offset, nbytes;
793 unsigned int pages_nr = iov_num_pages(ptr, len);
787 struct scatterlist *sg = sgl; 794 struct scatterlist *sg = sgl;
788 void __user *ptr = iov->iov_base; 795 struct page **pages = cmd->tvc_upages;
789 size_t len = iov->iov_len;
790 int ret, i; 796 int ret, i;
791 797
792 pages_nr = iov_num_pages(iov); 798 if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
793 if (pages_nr > sgl_count) {
794 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
795 " sgl_count: %u\n", pages_nr, sgl_count);
796 return -ENOBUFS;
797 }
798 if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
799 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" 799 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
800 " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n", 800 " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
801 pages_nr, TCM_VHOST_PREALLOC_UPAGES); 801 pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
802 return -ENOBUFS; 802 return -ENOBUFS;
803 } 803 }
804 804
@@ -829,84 +829,94 @@ out:
829} 829}
830 830
831static int 831static int
832vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, 832vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
833 struct iovec *iov,
834 int niov,
835 bool write)
836{ 833{
837 struct scatterlist *sg = cmd->tvc_sgl; 834 int sgl_count = 0;
838 unsigned int sgl_count = 0;
839 int ret, i;
840 835
841 for (i = 0; i < niov; i++) 836 if (!iter || !iter->iov) {
842 sgl_count += iov_num_pages(&iov[i]); 837 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
838 " present\n", __func__, bytes);
839 return -EINVAL;
840 }
843 841
844 if (sgl_count > TCM_VHOST_PREALLOC_SGLS) { 842 sgl_count = iov_iter_npages(iter, 0xffff);
845 pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than" 843 if (sgl_count > max_sgls) {
846 " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n", 844 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
847 sgl_count, TCM_VHOST_PREALLOC_SGLS); 845 " max_sgls: %d\n", __func__, sgl_count, max_sgls);
848 return -ENOBUFS; 846 return -EINVAL;
849 } 847 }
848 return sgl_count;
849}
850 850
851 pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count); 851static int
852 sg_init_table(sg, sgl_count); 852vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
853 cmd->tvc_sgl_count = sgl_count; 853 struct iov_iter *iter,
854 struct scatterlist *sg, int sg_count)
855{
856 size_t off = iter->iov_offset;
857 int i, ret;
854 858
855 pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count); 859 for (i = 0; i < iter->nr_segs; i++) {
860 void __user *base = iter->iov[i].iov_base + off;
861 size_t len = iter->iov[i].iov_len - off;
856 862
857 for (i = 0; i < niov; i++) { 863 ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
858 ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
859 cmd->tvc_upages, write);
860 if (ret < 0) { 864 if (ret < 0) {
861 for (i = 0; i < cmd->tvc_sgl_count; i++) 865 for (i = 0; i < sg_count; i++) {
862 put_page(sg_page(&cmd->tvc_sgl[i])); 866 struct page *page = sg_page(&sg[i]);
863 867 if (page)
864 cmd->tvc_sgl_count = 0; 868 put_page(page);
869 }
865 return ret; 870 return ret;
866 } 871 }
867 sg += ret; 872 sg += ret;
868 sgl_count -= ret; 873 off = 0;
869 } 874 }
870 return 0; 875 return 0;
871} 876}
872 877
873static int 878static int
874vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, 879vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
875 struct iovec *iov, 880 size_t prot_bytes, struct iov_iter *prot_iter,
876 int niov, 881 size_t data_bytes, struct iov_iter *data_iter)
877 bool write) 882{
878{ 883 int sgl_count, ret;
879 struct scatterlist *prot_sg = cmd->tvc_prot_sgl; 884 bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
880 unsigned int prot_sgl_count = 0; 885
881 int ret, i; 886 if (prot_bytes) {
882 887 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
883 for (i = 0; i < niov; i++) 888 VHOST_SCSI_PREALLOC_PROT_SGLS);
884 prot_sgl_count += iov_num_pages(&iov[i]); 889 if (sgl_count < 0)
885 890 return sgl_count;
886 if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) { 891
887 pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than" 892 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
888 " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n", 893 cmd->tvc_prot_sgl_count = sgl_count;
889 prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS); 894 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
890 return -ENOBUFS; 895 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
891 } 896
892 897 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
893 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, 898 cmd->tvc_prot_sgl,
894 prot_sg, prot_sgl_count); 899 cmd->tvc_prot_sgl_count);
895 sg_init_table(prot_sg, prot_sgl_count);
896 cmd->tvc_prot_sgl_count = prot_sgl_count;
897
898 for (i = 0; i < niov; i++) {
899 ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
900 cmd->tvc_upages, write);
901 if (ret < 0) { 900 if (ret < 0) {
902 for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
903 put_page(sg_page(&cmd->tvc_prot_sgl[i]));
904
905 cmd->tvc_prot_sgl_count = 0; 901 cmd->tvc_prot_sgl_count = 0;
906 return ret; 902 return ret;
907 } 903 }
908 prot_sg += ret; 904 }
909 prot_sgl_count -= ret; 905 sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
906 VHOST_SCSI_PREALLOC_SGLS);
907 if (sgl_count < 0)
908 return sgl_count;
909
910 sg_init_table(cmd->tvc_sgl, sgl_count);
911 cmd->tvc_sgl_count = sgl_count;
912 pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
913 cmd->tvc_sgl, cmd->tvc_sgl_count);
914
915 ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
916 cmd->tvc_sgl, cmd->tvc_sgl_count);
917 if (ret < 0) {
918 cmd->tvc_sgl_count = 0;
919 return ret;
910 } 920 }
911 return 0; 921 return 0;
912} 922}
@@ -928,11 +938,11 @@ static int vhost_scsi_to_tcm_attr(int attr)
928 return TCM_SIMPLE_TAG; 938 return TCM_SIMPLE_TAG;
929} 939}
930 940
931static void tcm_vhost_submission_work(struct work_struct *work) 941static void vhost_scsi_submission_work(struct work_struct *work)
932{ 942{
933 struct tcm_vhost_cmd *cmd = 943 struct vhost_scsi_cmd *cmd =
934 container_of(work, struct tcm_vhost_cmd, work); 944 container_of(work, struct vhost_scsi_cmd, work);
935 struct tcm_vhost_nexus *tv_nexus; 945 struct vhost_scsi_nexus *tv_nexus;
936 struct se_cmd *se_cmd = &cmd->tvc_se_cmd; 946 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
937 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL; 947 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
938 int rc; 948 int rc;
@@ -986,19 +996,20 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
986static void 996static void
987vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) 997vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
988{ 998{
989 struct tcm_vhost_tpg **vs_tpg; 999 struct vhost_scsi_tpg **vs_tpg, *tpg;
990 struct virtio_scsi_cmd_req v_req; 1000 struct virtio_scsi_cmd_req v_req;
991 struct virtio_scsi_cmd_req_pi v_req_pi; 1001 struct virtio_scsi_cmd_req_pi v_req_pi;
992 struct tcm_vhost_tpg *tpg; 1002 struct vhost_scsi_cmd *cmd;
993 struct tcm_vhost_cmd *cmd; 1003 struct iov_iter out_iter, in_iter, prot_iter, data_iter;
994 u64 tag; 1004 u64 tag;
995 u32 exp_data_len, data_first, data_num, data_direction, prot_first; 1005 u32 exp_data_len, data_direction;
996 unsigned out, in, i; 1006 unsigned out, in;
997 int head, ret, data_niov, prot_niov, prot_bytes; 1007 int head, ret, prot_bytes;
998 size_t req_size; 1008 size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
1009 size_t out_size, in_size;
999 u16 lun; 1010 u16 lun;
1000 u8 *target, *lunp, task_attr; 1011 u8 *target, *lunp, task_attr;
1001 bool hdr_pi; 1012 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
1002 void *req, *cdb; 1013 void *req, *cdb;
1003 1014
1004 mutex_lock(&vq->mutex); 1015 mutex_lock(&vq->mutex);
@@ -1014,10 +1025,10 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1014 1025
1015 for (;;) { 1026 for (;;) {
1016 head = vhost_get_vq_desc(vq, vq->iov, 1027 head = vhost_get_vq_desc(vq, vq->iov,
1017 ARRAY_SIZE(vq->iov), &out, &in, 1028 ARRAY_SIZE(vq->iov), &out, &in,
1018 NULL, NULL); 1029 NULL, NULL);
1019 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", 1030 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1020 head, out, in); 1031 head, out, in);
1021 /* On error, stop handling until the next kick. */ 1032 /* On error, stop handling until the next kick. */
1022 if (unlikely(head < 0)) 1033 if (unlikely(head < 0))
1023 break; 1034 break;
@@ -1029,113 +1040,134 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1029 } 1040 }
1030 break; 1041 break;
1031 } 1042 }
1032
1033 /* FIXME: BIDI operation */
1034 if (out == 1 && in == 1) {
1035 data_direction = DMA_NONE;
1036 data_first = 0;
1037 data_num = 0;
1038 } else if (out == 1 && in > 1) {
1039 data_direction = DMA_FROM_DEVICE;
1040 data_first = out + 1;
1041 data_num = in - 1;
1042 } else if (out > 1 && in == 1) {
1043 data_direction = DMA_TO_DEVICE;
1044 data_first = 1;
1045 data_num = out - 1;
1046 } else {
1047 vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
1048 out, in);
1049 break;
1050 }
1051
1052 /* 1043 /*
1053 * Check for a sane resp buffer so we can report errors to 1044 * Check for a sane response buffer so we can report early
1054 * the guest. 1045 * errors back to the guest.
1055 */ 1046 */
1056 if (unlikely(vq->iov[out].iov_len != 1047 if (unlikely(vq->iov[out].iov_len < rsp_size)) {
1057 sizeof(struct virtio_scsi_cmd_resp))) { 1048 vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
1058 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" 1049 " size, got %zu bytes\n", vq->iov[out].iov_len);
1059 " bytes\n", vq->iov[out].iov_len);
1060 break; 1050 break;
1061 } 1051 }
1062 1052 /*
1063 if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) { 1053 * Setup pointers and values based upon different virtio-scsi
1054 * request header if T10_PI is enabled in KVM guest.
1055 */
1056 if (t10_pi) {
1064 req = &v_req_pi; 1057 req = &v_req_pi;
1058 req_size = sizeof(v_req_pi);
1065 lunp = &v_req_pi.lun[0]; 1059 lunp = &v_req_pi.lun[0];
1066 target = &v_req_pi.lun[1]; 1060 target = &v_req_pi.lun[1];
1067 req_size = sizeof(v_req_pi);
1068 hdr_pi = true;
1069 } else { 1061 } else {
1070 req = &v_req; 1062 req = &v_req;
1063 req_size = sizeof(v_req);
1071 lunp = &v_req.lun[0]; 1064 lunp = &v_req.lun[0];
1072 target = &v_req.lun[1]; 1065 target = &v_req.lun[1];
1073 req_size = sizeof(v_req);
1074 hdr_pi = false;
1075 } 1066 }
1067 /*
1068 * FIXME: Not correct for BIDI operation
1069 */
1070 out_size = iov_length(vq->iov, out);
1071 in_size = iov_length(&vq->iov[out], in);
1076 1072
1077 if (unlikely(vq->iov[0].iov_len < req_size)) { 1073 /*
1078 pr_err("Expecting virtio-scsi header: %zu, got %zu\n", 1074 * Copy over the virtio-scsi request header, which for a
1079 req_size, vq->iov[0].iov_len); 1075 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
1080 break; 1076 * single iovec may contain both the header + outgoing
1081 } 1077 * WRITE payloads.
1082 ret = copy_from_user(req, vq->iov[0].iov_base, req_size); 1078 *
1083 if (unlikely(ret)) { 1079 * copy_from_iter() will advance out_iter, so that it will
1084 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); 1080 * point at the start of the outgoing WRITE payload, if
1085 break; 1081 * DMA_TO_DEVICE is set.
1086 } 1082 */
1083 iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
1087 1084
1085 ret = copy_from_iter(req, req_size, &out_iter);
1086 if (unlikely(ret != req_size)) {
1087 vq_err(vq, "Faulted on copy_from_iter\n");
1088 vhost_scsi_send_bad_target(vs, vq, head, out);
1089 continue;
1090 }
1088 /* virtio-scsi spec requires byte 0 of the lun to be 1 */ 1091 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1089 if (unlikely(*lunp != 1)) { 1092 if (unlikely(*lunp != 1)) {
1093 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
1090 vhost_scsi_send_bad_target(vs, vq, head, out); 1094 vhost_scsi_send_bad_target(vs, vq, head, out);
1091 continue; 1095 continue;
1092 } 1096 }
1093 1097
1094 tpg = ACCESS_ONCE(vs_tpg[*target]); 1098 tpg = ACCESS_ONCE(vs_tpg[*target]);
1095
1096 /* Target does not exist, fail the request */
1097 if (unlikely(!tpg)) { 1099 if (unlikely(!tpg)) {
1100 /* Target does not exist, fail the request */
1098 vhost_scsi_send_bad_target(vs, vq, head, out); 1101 vhost_scsi_send_bad_target(vs, vq, head, out);
1099 continue; 1102 continue;
1100 } 1103 }
1101
1102 data_niov = data_num;
1103 prot_niov = prot_first = prot_bytes = 0;
1104 /* 1104 /*
1105 * Determine if any protection information iovecs are preceeding 1105 * Determine data_direction by calculating the total outgoing
1106 * the actual data payload, and adjust data_first + data_niov 1106 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1107 * values accordingly for vhost_scsi_map_iov_to_sgl() below. 1107 * response headers respectively.
1108 * 1108 *
1109 * Also extract virtio_scsi header bits for vhost_scsi_get_tag() 1109 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1110 * to the right place.
1111 *
1112 * For DMA_FROM_DEVICE, the iovec will be just past the end
1113 * of the virtio-scsi response header in either the same
1114 * or immediately following iovec.
1115 *
1116 * Any associated T10_PI bytes for the outgoing / incoming
1117 * payloads are included in calculation of exp_data_len here.
1110 */ 1118 */
1111 if (hdr_pi) { 1119 prot_bytes = 0;
1120
1121 if (out_size > req_size) {
1122 data_direction = DMA_TO_DEVICE;
1123 exp_data_len = out_size - req_size;
1124 data_iter = out_iter;
1125 } else if (in_size > rsp_size) {
1126 data_direction = DMA_FROM_DEVICE;
1127 exp_data_len = in_size - rsp_size;
1128
1129 iov_iter_init(&in_iter, READ, &vq->iov[out], in,
1130 rsp_size + exp_data_len);
1131 iov_iter_advance(&in_iter, rsp_size);
1132 data_iter = in_iter;
1133 } else {
1134 data_direction = DMA_NONE;
1135 exp_data_len = 0;
1136 }
1137 /*
1138 * If T10_PI header + payload is present, setup prot_iter values
1139 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1140 * host scatterlists via get_user_pages_fast().
1141 */
1142 if (t10_pi) {
1112 if (v_req_pi.pi_bytesout) { 1143 if (v_req_pi.pi_bytesout) {
1113 if (data_direction != DMA_TO_DEVICE) { 1144 if (data_direction != DMA_TO_DEVICE) {
1114 vq_err(vq, "Received non zero do_pi_niov" 1145 vq_err(vq, "Received non zero pi_bytesout,"
1115 ", but wrong data_direction\n"); 1146 " but wrong data_direction\n");
1116 goto err_cmd; 1147 vhost_scsi_send_bad_target(vs, vq, head, out);
1148 continue;
1117 } 1149 }
1118 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); 1150 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1119 } else if (v_req_pi.pi_bytesin) { 1151 } else if (v_req_pi.pi_bytesin) {
1120 if (data_direction != DMA_FROM_DEVICE) { 1152 if (data_direction != DMA_FROM_DEVICE) {
1121 vq_err(vq, "Received non zero di_pi_niov" 1153 vq_err(vq, "Received non zero pi_bytesin,"
1122 ", but wrong data_direction\n"); 1154 " but wrong data_direction\n");
1123 goto err_cmd; 1155 vhost_scsi_send_bad_target(vs, vq, head, out);
1156 continue;
1124 } 1157 }
1125 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); 1158 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1126 } 1159 }
1160 /*
1161 * Set prot_iter to data_iter, and advance past any
1162 * preceeding prot_bytes that may be present.
1163 *
1164 * Also fix up the exp_data_len to reflect only the
1165 * actual data payload length.
1166 */
1127 if (prot_bytes) { 1167 if (prot_bytes) {
1128 int tmp = 0; 1168 exp_data_len -= prot_bytes;
1129 1169 prot_iter = data_iter;
1130 for (i = 0; i < data_num; i++) { 1170 iov_iter_advance(&data_iter, prot_bytes);
1131 tmp += vq->iov[data_first + i].iov_len;
1132 prot_niov++;
1133 if (tmp >= prot_bytes)
1134 break;
1135 }
1136 prot_first = data_first;
1137 data_first += prot_niov;
1138 data_niov = data_num - prot_niov;
1139 } 1171 }
1140 tag = vhost64_to_cpu(vq, v_req_pi.tag); 1172 tag = vhost64_to_cpu(vq, v_req_pi.tag);
1141 task_attr = v_req_pi.task_attr; 1173 task_attr = v_req_pi.task_attr;
@@ -1147,83 +1179,65 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1147 cdb = &v_req.cdb[0]; 1179 cdb = &v_req.cdb[0];
1148 lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; 1180 lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1149 } 1181 }
1150 exp_data_len = 0;
1151 for (i = 0; i < data_niov; i++)
1152 exp_data_len += vq->iov[data_first + i].iov_len;
1153 /* 1182 /*
1154 * Check that the recieved CDB size does not exceeded our 1183 * Check that the received CDB size does not exceeded our
1155 * hardcoded max for vhost-scsi 1184 * hardcoded max for vhost-scsi, then get a pre-allocated
1185 * cmd descriptor for the new virtio-scsi tag.
1156 * 1186 *
1157 * TODO what if cdb was too small for varlen cdb header? 1187 * TODO what if cdb was too small for varlen cdb header?
1158 */ 1188 */
1159 if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) { 1189 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1160 vq_err(vq, "Received SCSI CDB with command_size: %d that" 1190 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1161 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1191 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1162 scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE); 1192 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1163 goto err_cmd; 1193 vhost_scsi_send_bad_target(vs, vq, head, out);
1194 continue;
1164 } 1195 }
1165
1166 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, 1196 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1167 exp_data_len + prot_bytes, 1197 exp_data_len + prot_bytes,
1168 data_direction); 1198 data_direction);
1169 if (IS_ERR(cmd)) { 1199 if (IS_ERR(cmd)) {
1170 vq_err(vq, "vhost_scsi_get_tag failed %ld\n", 1200 vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1171 PTR_ERR(cmd)); 1201 PTR_ERR(cmd));
1172 goto err_cmd; 1202 vhost_scsi_send_bad_target(vs, vq, head, out);
1203 continue;
1173 } 1204 }
1174
1175 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
1176 ": %d\n", cmd, exp_data_len, data_direction);
1177
1178 cmd->tvc_vhost = vs; 1205 cmd->tvc_vhost = vs;
1179 cmd->tvc_vq = vq; 1206 cmd->tvc_vq = vq;
1180 cmd->tvc_resp = vq->iov[out].iov_base; 1207 cmd->tvc_resp_iov = &vq->iov[out];
1208 cmd->tvc_in_iovs = in;
1181 1209
1182 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", 1210 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1183 cmd->tvc_cdb[0], cmd->tvc_lun); 1211 cmd->tvc_cdb[0], cmd->tvc_lun);
1212 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1213 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1184 1214
1185 if (prot_niov) {
1186 ret = vhost_scsi_map_iov_to_prot(cmd,
1187 &vq->iov[prot_first], prot_niov,
1188 data_direction == DMA_FROM_DEVICE);
1189 if (unlikely(ret)) {
1190 vq_err(vq, "Failed to map iov to"
1191 " prot_sgl\n");
1192 goto err_free;
1193 }
1194 }
1195 if (data_direction != DMA_NONE) { 1215 if (data_direction != DMA_NONE) {
1196 ret = vhost_scsi_map_iov_to_sgl(cmd, 1216 ret = vhost_scsi_mapal(cmd,
1197 &vq->iov[data_first], data_niov, 1217 prot_bytes, &prot_iter,
1198 data_direction == DMA_FROM_DEVICE); 1218 exp_data_len, &data_iter);
1199 if (unlikely(ret)) { 1219 if (unlikely(ret)) {
1200 vq_err(vq, "Failed to map iov to sgl\n"); 1220 vq_err(vq, "Failed to map iov to sgl\n");
1201 goto err_free; 1221 vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1222 vhost_scsi_send_bad_target(vs, vq, head, out);
1223 continue;
1202 } 1224 }
1203 } 1225 }
1204 /* 1226 /*
1205 * Save the descriptor from vhost_get_vq_desc() to be used to 1227 * Save the descriptor from vhost_get_vq_desc() to be used to
1206 * complete the virtio-scsi request in TCM callback context via 1228 * complete the virtio-scsi request in TCM callback context via
1207 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status() 1229 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1208 */ 1230 */
1209 cmd->tvc_vq_desc = head; 1231 cmd->tvc_vq_desc = head;
1210 /* 1232 /*
1211 * Dispatch tv_cmd descriptor for cmwq execution in process 1233 * Dispatch cmd descriptor for cmwq execution in process
1212 * context provided by tcm_vhost_workqueue. This also ensures 1234 * context provided by vhost_scsi_workqueue. This also ensures
1213 * tv_cmd is executed on the same kworker CPU as this vhost 1235 * cmd is executed on the same kworker CPU as this vhost
1214 * thread to gain positive L2 cache locality effects.. 1236 * thread to gain positive L2 cache locality effects.
1215 */ 1237 */
1216 INIT_WORK(&cmd->work, tcm_vhost_submission_work); 1238 INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1217 queue_work(tcm_vhost_workqueue, &cmd->work); 1239 queue_work(vhost_scsi_workqueue, &cmd->work);
1218 } 1240 }
1219
1220 mutex_unlock(&vq->mutex);
1221 return;
1222
1223err_free:
1224 vhost_scsi_free_cmd(cmd);
1225err_cmd:
1226 vhost_scsi_send_bad_target(vs, vq, head, out);
1227out: 1241out:
1228 mutex_unlock(&vq->mutex); 1242 mutex_unlock(&vq->mutex);
1229} 1243}
@@ -1234,15 +1248,15 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1234} 1248}
1235 1249
1236static void 1250static void
1237tcm_vhost_send_evt(struct vhost_scsi *vs, 1251vhost_scsi_send_evt(struct vhost_scsi *vs,
1238 struct tcm_vhost_tpg *tpg, 1252 struct vhost_scsi_tpg *tpg,
1239 struct se_lun *lun, 1253 struct se_lun *lun,
1240 u32 event, 1254 u32 event,
1241 u32 reason) 1255 u32 reason)
1242{ 1256{
1243 struct tcm_vhost_evt *evt; 1257 struct vhost_scsi_evt *evt;
1244 1258
1245 evt = tcm_vhost_allocate_evt(vs, event, reason); 1259 evt = vhost_scsi_allocate_evt(vs, event, reason);
1246 if (!evt) 1260 if (!evt)
1247 return; 1261 return;
1248 1262
@@ -1253,7 +1267,7 @@ tcm_vhost_send_evt(struct vhost_scsi *vs,
1253 * lun[4-7] need to be zero according to virtio-scsi spec. 1267 * lun[4-7] need to be zero according to virtio-scsi spec.
1254 */ 1268 */
1255 evt->event.lun[0] = 0x01; 1269 evt->event.lun[0] = 0x01;
1256 evt->event.lun[1] = tpg->tport_tpgt & 0xFF; 1270 evt->event.lun[1] = tpg->tport_tpgt;
1257 if (lun->unpacked_lun >= 256) 1271 if (lun->unpacked_lun >= 256)
1258 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; 1272 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1259 evt->event.lun[3] = lun->unpacked_lun & 0xFF; 1273 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
@@ -1274,7 +1288,7 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1274 goto out; 1288 goto out;
1275 1289
1276 if (vs->vs_events_missed) 1290 if (vs->vs_events_missed)
1277 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); 1291 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1278out: 1292out:
1279 mutex_unlock(&vq->mutex); 1293 mutex_unlock(&vq->mutex);
1280} 1294}
@@ -1300,7 +1314,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
1300 int i; 1314 int i;
1301 1315
1302 /* Init new inflight and remember the old inflight */ 1316 /* Init new inflight and remember the old inflight */
1303 tcm_vhost_init_inflight(vs, old_inflight); 1317 vhost_scsi_init_inflight(vs, old_inflight);
1304 1318
1305 /* 1319 /*
1306 * The inflight->kref was initialized to 1. We decrement it here to 1320 * The inflight->kref was initialized to 1. We decrement it here to
@@ -1308,7 +1322,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
1308 * when all the reqs are finished. 1322 * when all the reqs are finished.
1309 */ 1323 */
1310 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1324 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1311 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight); 1325 kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1312 1326
1313 /* Flush both the vhost poll and vhost work */ 1327 /* Flush both the vhost poll and vhost work */
1314 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1328 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
@@ -1323,24 +1337,24 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
1323 1337
1324/* 1338/*
1325 * Called from vhost_scsi_ioctl() context to walk the list of available 1339 * Called from vhost_scsi_ioctl() context to walk the list of available
1326 * tcm_vhost_tpg with an active struct tcm_vhost_nexus 1340 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1327 * 1341 *
1328 * The lock nesting rule is: 1342 * The lock nesting rule is:
1329 * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex 1343 * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1330 */ 1344 */
1331static int 1345static int
1332vhost_scsi_set_endpoint(struct vhost_scsi *vs, 1346vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1333 struct vhost_scsi_target *t) 1347 struct vhost_scsi_target *t)
1334{ 1348{
1335 struct se_portal_group *se_tpg; 1349 struct se_portal_group *se_tpg;
1336 struct tcm_vhost_tport *tv_tport; 1350 struct vhost_scsi_tport *tv_tport;
1337 struct tcm_vhost_tpg *tpg; 1351 struct vhost_scsi_tpg *tpg;
1338 struct tcm_vhost_tpg **vs_tpg; 1352 struct vhost_scsi_tpg **vs_tpg;
1339 struct vhost_virtqueue *vq; 1353 struct vhost_virtqueue *vq;
1340 int index, ret, i, len; 1354 int index, ret, i, len;
1341 bool match = false; 1355 bool match = false;
1342 1356
1343 mutex_lock(&tcm_vhost_mutex); 1357 mutex_lock(&vhost_scsi_mutex);
1344 mutex_lock(&vs->dev.mutex); 1358 mutex_lock(&vs->dev.mutex);
1345 1359
1346 /* Verify that ring has been setup correctly. */ 1360 /* Verify that ring has been setup correctly. */
@@ -1361,7 +1375,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1361 if (vs->vs_tpg) 1375 if (vs->vs_tpg)
1362 memcpy(vs_tpg, vs->vs_tpg, len); 1376 memcpy(vs_tpg, vs->vs_tpg, len);
1363 1377
1364 list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) { 1378 list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1365 mutex_lock(&tpg->tv_tpg_mutex); 1379 mutex_lock(&tpg->tv_tpg_mutex);
1366 if (!tpg->tpg_nexus) { 1380 if (!tpg->tpg_nexus) {
1367 mutex_unlock(&tpg->tv_tpg_mutex); 1381 mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1429,7 +1443,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1429 1443
1430out: 1444out:
1431 mutex_unlock(&vs->dev.mutex); 1445 mutex_unlock(&vs->dev.mutex);
1432 mutex_unlock(&tcm_vhost_mutex); 1446 mutex_unlock(&vhost_scsi_mutex);
1433 return ret; 1447 return ret;
1434} 1448}
1435 1449
@@ -1438,14 +1452,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1438 struct vhost_scsi_target *t) 1452 struct vhost_scsi_target *t)
1439{ 1453{
1440 struct se_portal_group *se_tpg; 1454 struct se_portal_group *se_tpg;
1441 struct tcm_vhost_tport *tv_tport; 1455 struct vhost_scsi_tport *tv_tport;
1442 struct tcm_vhost_tpg *tpg; 1456 struct vhost_scsi_tpg *tpg;
1443 struct vhost_virtqueue *vq; 1457 struct vhost_virtqueue *vq;
1444 bool match = false; 1458 bool match = false;
1445 int index, ret, i; 1459 int index, ret, i;
1446 u8 target; 1460 u8 target;
1447 1461
1448 mutex_lock(&tcm_vhost_mutex); 1462 mutex_lock(&vhost_scsi_mutex);
1449 mutex_lock(&vs->dev.mutex); 1463 mutex_lock(&vs->dev.mutex);
1450 /* Verify that ring has been setup correctly. */ 1464 /* Verify that ring has been setup correctly. */
1451 for (index = 0; index < vs->dev.nvqs; ++index) { 1465 for (index = 0; index < vs->dev.nvqs; ++index) {
@@ -1511,14 +1525,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1511 vs->vs_tpg = NULL; 1525 vs->vs_tpg = NULL;
1512 WARN_ON(vs->vs_events_nr); 1526 WARN_ON(vs->vs_events_nr);
1513 mutex_unlock(&vs->dev.mutex); 1527 mutex_unlock(&vs->dev.mutex);
1514 mutex_unlock(&tcm_vhost_mutex); 1528 mutex_unlock(&vhost_scsi_mutex);
1515 return 0; 1529 return 0;
1516 1530
1517err_tpg: 1531err_tpg:
1518 mutex_unlock(&tpg->tv_tpg_mutex); 1532 mutex_unlock(&tpg->tv_tpg_mutex);
1519err_dev: 1533err_dev:
1520 mutex_unlock(&vs->dev.mutex); 1534 mutex_unlock(&vs->dev.mutex);
1521 mutex_unlock(&tcm_vhost_mutex); 1535 mutex_unlock(&vhost_scsi_mutex);
1522 return ret; 1536 return ret;
1523} 1537}
1524 1538
@@ -1565,7 +1579,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1565 goto err_vqs; 1579 goto err_vqs;
1566 1580
1567 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); 1581 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1568 vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work); 1582 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1569 1583
1570 vs->vs_events_nr = 0; 1584 vs->vs_events_nr = 0;
1571 vs->vs_events_missed = false; 1585 vs->vs_events_missed = false;
@@ -1580,7 +1594,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1580 } 1594 }
1581 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); 1595 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1582 1596
1583 tcm_vhost_init_inflight(vs, NULL); 1597 vhost_scsi_init_inflight(vs, NULL);
1584 1598
1585 f->private_data = vs; 1599 f->private_data = vs;
1586 return 0; 1600 return 0;
@@ -1712,7 +1726,7 @@ static int vhost_scsi_deregister(void)
1712 return misc_deregister(&vhost_scsi_misc); 1726 return misc_deregister(&vhost_scsi_misc);
1713} 1727}
1714 1728
1715static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) 1729static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1716{ 1730{
1717 switch (tport->tport_proto_id) { 1731 switch (tport->tport_proto_id) {
1718 case SCSI_PROTOCOL_SAS: 1732 case SCSI_PROTOCOL_SAS:
@@ -1729,7 +1743,7 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1729} 1743}
1730 1744
1731static void 1745static void
1732tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, 1746vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1733 struct se_lun *lun, bool plug) 1747 struct se_lun *lun, bool plug)
1734{ 1748{
1735 1749
@@ -1750,71 +1764,71 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1750 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1764 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1751 mutex_lock(&vq->mutex); 1765 mutex_lock(&vq->mutex);
1752 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG)) 1766 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1753 tcm_vhost_send_evt(vs, tpg, lun, 1767 vhost_scsi_send_evt(vs, tpg, lun,
1754 VIRTIO_SCSI_T_TRANSPORT_RESET, reason); 1768 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1755 mutex_unlock(&vq->mutex); 1769 mutex_unlock(&vq->mutex);
1756 mutex_unlock(&vs->dev.mutex); 1770 mutex_unlock(&vs->dev.mutex);
1757} 1771}
1758 1772
1759static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) 1773static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1760{ 1774{
1761 tcm_vhost_do_plug(tpg, lun, true); 1775 vhost_scsi_do_plug(tpg, lun, true);
1762} 1776}
1763 1777
1764static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) 1778static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1765{ 1779{
1766 tcm_vhost_do_plug(tpg, lun, false); 1780 vhost_scsi_do_plug(tpg, lun, false);
1767} 1781}
1768 1782
1769static int tcm_vhost_port_link(struct se_portal_group *se_tpg, 1783static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1770 struct se_lun *lun) 1784 struct se_lun *lun)
1771{ 1785{
1772 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 1786 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1773 struct tcm_vhost_tpg, se_tpg); 1787 struct vhost_scsi_tpg, se_tpg);
1774 1788
1775 mutex_lock(&tcm_vhost_mutex); 1789 mutex_lock(&vhost_scsi_mutex);
1776 1790
1777 mutex_lock(&tpg->tv_tpg_mutex); 1791 mutex_lock(&tpg->tv_tpg_mutex);
1778 tpg->tv_tpg_port_count++; 1792 tpg->tv_tpg_port_count++;
1779 mutex_unlock(&tpg->tv_tpg_mutex); 1793 mutex_unlock(&tpg->tv_tpg_mutex);
1780 1794
1781 tcm_vhost_hotplug(tpg, lun); 1795 vhost_scsi_hotplug(tpg, lun);
1782 1796
1783 mutex_unlock(&tcm_vhost_mutex); 1797 mutex_unlock(&vhost_scsi_mutex);
1784 1798
1785 return 0; 1799 return 0;
1786} 1800}
1787 1801
1788static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, 1802static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1789 struct se_lun *lun) 1803 struct se_lun *lun)
1790{ 1804{
1791 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 1805 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1792 struct tcm_vhost_tpg, se_tpg); 1806 struct vhost_scsi_tpg, se_tpg);
1793 1807
1794 mutex_lock(&tcm_vhost_mutex); 1808 mutex_lock(&vhost_scsi_mutex);
1795 1809
1796 mutex_lock(&tpg->tv_tpg_mutex); 1810 mutex_lock(&tpg->tv_tpg_mutex);
1797 tpg->tv_tpg_port_count--; 1811 tpg->tv_tpg_port_count--;
1798 mutex_unlock(&tpg->tv_tpg_mutex); 1812 mutex_unlock(&tpg->tv_tpg_mutex);
1799 1813
1800 tcm_vhost_hotunplug(tpg, lun); 1814 vhost_scsi_hotunplug(tpg, lun);
1801 1815
1802 mutex_unlock(&tcm_vhost_mutex); 1816 mutex_unlock(&vhost_scsi_mutex);
1803} 1817}
1804 1818
1805static struct se_node_acl * 1819static struct se_node_acl *
1806tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg, 1820vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg,
1807 struct config_group *group, 1821 struct config_group *group,
1808 const char *name) 1822 const char *name)
1809{ 1823{
1810 struct se_node_acl *se_nacl, *se_nacl_new; 1824 struct se_node_acl *se_nacl, *se_nacl_new;
1811 struct tcm_vhost_nacl *nacl; 1825 struct vhost_scsi_nacl *nacl;
1812 u64 wwpn = 0; 1826 u64 wwpn = 0;
1813 u32 nexus_depth; 1827 u32 nexus_depth;
1814 1828
1815 /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) 1829 /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
1816 return ERR_PTR(-EINVAL); */ 1830 return ERR_PTR(-EINVAL); */
1817 se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg); 1831 se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg);
1818 if (!se_nacl_new) 1832 if (!se_nacl_new)
1819 return ERR_PTR(-ENOMEM); 1833 return ERR_PTR(-ENOMEM);
1820 1834
@@ -1826,37 +1840,37 @@ tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
1826 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, 1840 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1827 name, nexus_depth); 1841 name, nexus_depth);
1828 if (IS_ERR(se_nacl)) { 1842 if (IS_ERR(se_nacl)) {
1829 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new); 1843 vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new);
1830 return se_nacl; 1844 return se_nacl;
1831 } 1845 }
1832 /* 1846 /*
1833 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN 1847 * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN
1834 */ 1848 */
1835 nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl); 1849 nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl);
1836 nacl->iport_wwpn = wwpn; 1850 nacl->iport_wwpn = wwpn;
1837 1851
1838 return se_nacl; 1852 return se_nacl;
1839} 1853}
1840 1854
1841static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl) 1855static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl)
1842{ 1856{
1843 struct tcm_vhost_nacl *nacl = container_of(se_acl, 1857 struct vhost_scsi_nacl *nacl = container_of(se_acl,
1844 struct tcm_vhost_nacl, se_node_acl); 1858 struct vhost_scsi_nacl, se_node_acl);
1845 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); 1859 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1846 kfree(nacl); 1860 kfree(nacl);
1847} 1861}
1848 1862
1849static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus, 1863static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
1850 struct se_session *se_sess) 1864 struct se_session *se_sess)
1851{ 1865{
1852 struct tcm_vhost_cmd *tv_cmd; 1866 struct vhost_scsi_cmd *tv_cmd;
1853 unsigned int i; 1867 unsigned int i;
1854 1868
1855 if (!se_sess->sess_cmd_map) 1869 if (!se_sess->sess_cmd_map)
1856 return; 1870 return;
1857 1871
1858 for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) { 1872 for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1859 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; 1873 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1860 1874
1861 kfree(tv_cmd->tvc_sgl); 1875 kfree(tv_cmd->tvc_sgl);
1862 kfree(tv_cmd->tvc_prot_sgl); 1876 kfree(tv_cmd->tvc_prot_sgl);
@@ -1864,13 +1878,13 @@ static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
1864 } 1878 }
1865} 1879}
1866 1880
1867static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, 1881static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1868 const char *name) 1882 const char *name)
1869{ 1883{
1870 struct se_portal_group *se_tpg; 1884 struct se_portal_group *se_tpg;
1871 struct se_session *se_sess; 1885 struct se_session *se_sess;
1872 struct tcm_vhost_nexus *tv_nexus; 1886 struct vhost_scsi_nexus *tv_nexus;
1873 struct tcm_vhost_cmd *tv_cmd; 1887 struct vhost_scsi_cmd *tv_cmd;
1874 unsigned int i; 1888 unsigned int i;
1875 1889
1876 mutex_lock(&tpg->tv_tpg_mutex); 1890 mutex_lock(&tpg->tv_tpg_mutex);
@@ -1881,19 +1895,19 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1881 } 1895 }
1882 se_tpg = &tpg->se_tpg; 1896 se_tpg = &tpg->se_tpg;
1883 1897
1884 tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL); 1898 tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
1885 if (!tv_nexus) { 1899 if (!tv_nexus) {
1886 mutex_unlock(&tpg->tv_tpg_mutex); 1900 mutex_unlock(&tpg->tv_tpg_mutex);
1887 pr_err("Unable to allocate struct tcm_vhost_nexus\n"); 1901 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1888 return -ENOMEM; 1902 return -ENOMEM;
1889 } 1903 }
1890 /* 1904 /*
1891 * Initialize the struct se_session pointer and setup tagpool 1905 * Initialize the struct se_session pointer and setup tagpool
1892 * for struct tcm_vhost_cmd descriptors 1906 * for struct vhost_scsi_cmd descriptors
1893 */ 1907 */
1894 tv_nexus->tvn_se_sess = transport_init_session_tags( 1908 tv_nexus->tvn_se_sess = transport_init_session_tags(
1895 TCM_VHOST_DEFAULT_TAGS, 1909 VHOST_SCSI_DEFAULT_TAGS,
1896 sizeof(struct tcm_vhost_cmd), 1910 sizeof(struct vhost_scsi_cmd),
1897 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); 1911 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
1898 if (IS_ERR(tv_nexus->tvn_se_sess)) { 1912 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1899 mutex_unlock(&tpg->tv_tpg_mutex); 1913 mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1901,11 +1915,11 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1901 return -ENOMEM; 1915 return -ENOMEM;
1902 } 1916 }
1903 se_sess = tv_nexus->tvn_se_sess; 1917 se_sess = tv_nexus->tvn_se_sess;
1904 for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) { 1918 for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1905 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; 1919 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1906 1920
1907 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) * 1921 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1908 TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL); 1922 VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
1909 if (!tv_cmd->tvc_sgl) { 1923 if (!tv_cmd->tvc_sgl) {
1910 mutex_unlock(&tpg->tv_tpg_mutex); 1924 mutex_unlock(&tpg->tv_tpg_mutex);
1911 pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); 1925 pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
@@ -1913,7 +1927,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1913 } 1927 }
1914 1928
1915 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) * 1929 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1916 TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL); 1930 VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
1917 if (!tv_cmd->tvc_upages) { 1931 if (!tv_cmd->tvc_upages) {
1918 mutex_unlock(&tpg->tv_tpg_mutex); 1932 mutex_unlock(&tpg->tv_tpg_mutex);
1919 pr_err("Unable to allocate tv_cmd->tvc_upages\n"); 1933 pr_err("Unable to allocate tv_cmd->tvc_upages\n");
@@ -1921,7 +1935,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1921 } 1935 }
1922 1936
1923 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) * 1937 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1924 TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL); 1938 VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
1925 if (!tv_cmd->tvc_prot_sgl) { 1939 if (!tv_cmd->tvc_prot_sgl) {
1926 mutex_unlock(&tpg->tv_tpg_mutex); 1940 mutex_unlock(&tpg->tv_tpg_mutex);
1927 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); 1941 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
@@ -1930,7 +1944,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1930 } 1944 }
1931 /* 1945 /*
1932 * Since we are running in 'demo mode' this call with generate a 1946 * Since we are running in 'demo mode' this call with generate a
1933 * struct se_node_acl for the tcm_vhost struct se_portal_group with 1947 * struct se_node_acl for the vhost_scsi struct se_portal_group with
1934 * the SCSI Initiator port name of the passed configfs group 'name'. 1948 * the SCSI Initiator port name of the passed configfs group 'name'.
1935 */ 1949 */
1936 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( 1950 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
@@ -1953,16 +1967,16 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1953 return 0; 1967 return 0;
1954 1968
1955out: 1969out:
1956 tcm_vhost_free_cmd_map_res(tv_nexus, se_sess); 1970 vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
1957 transport_free_session(se_sess); 1971 transport_free_session(se_sess);
1958 kfree(tv_nexus); 1972 kfree(tv_nexus);
1959 return -ENOMEM; 1973 return -ENOMEM;
1960} 1974}
1961 1975
1962static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) 1976static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1963{ 1977{
1964 struct se_session *se_sess; 1978 struct se_session *se_sess;
1965 struct tcm_vhost_nexus *tv_nexus; 1979 struct vhost_scsi_nexus *tv_nexus;
1966 1980
1967 mutex_lock(&tpg->tv_tpg_mutex); 1981 mutex_lock(&tpg->tv_tpg_mutex);
1968 tv_nexus = tpg->tpg_nexus; 1982 tv_nexus = tpg->tpg_nexus;
@@ -1994,10 +2008,10 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1994 } 2008 }
1995 2009
1996 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" 2010 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1997 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), 2011 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
1998 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 2012 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1999 2013
2000 tcm_vhost_free_cmd_map_res(tv_nexus, se_sess); 2014 vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
2001 /* 2015 /*
2002 * Release the SCSI I_T Nexus to the emulated vhost Target Port 2016 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2003 */ 2017 */
@@ -2009,12 +2023,12 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
2009 return 0; 2023 return 0;
2010} 2024}
2011 2025
2012static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, 2026static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg,
2013 char *page) 2027 char *page)
2014{ 2028{
2015 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 2029 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2016 struct tcm_vhost_tpg, se_tpg); 2030 struct vhost_scsi_tpg, se_tpg);
2017 struct tcm_vhost_nexus *tv_nexus; 2031 struct vhost_scsi_nexus *tv_nexus;
2018 ssize_t ret; 2032 ssize_t ret;
2019 2033
2020 mutex_lock(&tpg->tv_tpg_mutex); 2034 mutex_lock(&tpg->tv_tpg_mutex);
@@ -2030,40 +2044,40 @@ static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
2030 return ret; 2044 return ret;
2031} 2045}
2032 2046
2033static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, 2047static ssize_t vhost_scsi_tpg_store_nexus(struct se_portal_group *se_tpg,
2034 const char *page, 2048 const char *page,
2035 size_t count) 2049 size_t count)
2036{ 2050{
2037 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 2051 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2038 struct tcm_vhost_tpg, se_tpg); 2052 struct vhost_scsi_tpg, se_tpg);
2039 struct tcm_vhost_tport *tport_wwn = tpg->tport; 2053 struct vhost_scsi_tport *tport_wwn = tpg->tport;
2040 unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr; 2054 unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2041 int ret; 2055 int ret;
2042 /* 2056 /*
2043 * Shutdown the active I_T nexus if 'NULL' is passed.. 2057 * Shutdown the active I_T nexus if 'NULL' is passed..
2044 */ 2058 */
2045 if (!strncmp(page, "NULL", 4)) { 2059 if (!strncmp(page, "NULL", 4)) {
2046 ret = tcm_vhost_drop_nexus(tpg); 2060 ret = vhost_scsi_drop_nexus(tpg);
2047 return (!ret) ? count : ret; 2061 return (!ret) ? count : ret;
2048 } 2062 }
2049 /* 2063 /*
2050 * Otherwise make sure the passed virtual Initiator port WWN matches 2064 * Otherwise make sure the passed virtual Initiator port WWN matches
2051 * the fabric protocol_id set in tcm_vhost_make_tport(), and call 2065 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2052 * tcm_vhost_make_nexus(). 2066 * vhost_scsi_make_nexus().
2053 */ 2067 */
2054 if (strlen(page) >= TCM_VHOST_NAMELEN) { 2068 if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2055 pr_err("Emulated NAA Sas Address: %s, exceeds" 2069 pr_err("Emulated NAA Sas Address: %s, exceeds"
2056 " max: %d\n", page, TCM_VHOST_NAMELEN); 2070 " max: %d\n", page, VHOST_SCSI_NAMELEN);
2057 return -EINVAL; 2071 return -EINVAL;
2058 } 2072 }
2059 snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page); 2073 snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2060 2074
2061 ptr = strstr(i_port, "naa."); 2075 ptr = strstr(i_port, "naa.");
2062 if (ptr) { 2076 if (ptr) {
2063 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { 2077 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2064 pr_err("Passed SAS Initiator Port %s does not" 2078 pr_err("Passed SAS Initiator Port %s does not"
2065 " match target port protoid: %s\n", i_port, 2079 " match target port protoid: %s\n", i_port,
2066 tcm_vhost_dump_proto_id(tport_wwn)); 2080 vhost_scsi_dump_proto_id(tport_wwn));
2067 return -EINVAL; 2081 return -EINVAL;
2068 } 2082 }
2069 port_ptr = &i_port[0]; 2083 port_ptr = &i_port[0];
@@ -2074,7 +2088,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
2074 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { 2088 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2075 pr_err("Passed FCP Initiator Port %s does not" 2089 pr_err("Passed FCP Initiator Port %s does not"
2076 " match target port protoid: %s\n", i_port, 2090 " match target port protoid: %s\n", i_port,
2077 tcm_vhost_dump_proto_id(tport_wwn)); 2091 vhost_scsi_dump_proto_id(tport_wwn));
2078 return -EINVAL; 2092 return -EINVAL;
2079 } 2093 }
2080 port_ptr = &i_port[3]; /* Skip over "fc." */ 2094 port_ptr = &i_port[3]; /* Skip over "fc." */
@@ -2085,7 +2099,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
2085 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { 2099 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2086 pr_err("Passed iSCSI Initiator Port %s does not" 2100 pr_err("Passed iSCSI Initiator Port %s does not"
2087 " match target port protoid: %s\n", i_port, 2101 " match target port protoid: %s\n", i_port,
2088 tcm_vhost_dump_proto_id(tport_wwn)); 2102 vhost_scsi_dump_proto_id(tport_wwn));
2089 return -EINVAL; 2103 return -EINVAL;
2090 } 2104 }
2091 port_ptr = &i_port[0]; 2105 port_ptr = &i_port[0];
@@ -2101,40 +2115,40 @@ check_newline:
2101 if (i_port[strlen(i_port)-1] == '\n') 2115 if (i_port[strlen(i_port)-1] == '\n')
2102 i_port[strlen(i_port)-1] = '\0'; 2116 i_port[strlen(i_port)-1] = '\0';
2103 2117
2104 ret = tcm_vhost_make_nexus(tpg, port_ptr); 2118 ret = vhost_scsi_make_nexus(tpg, port_ptr);
2105 if (ret < 0) 2119 if (ret < 0)
2106 return ret; 2120 return ret;
2107 2121
2108 return count; 2122 return count;
2109} 2123}
2110 2124
2111TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR); 2125TF_TPG_BASE_ATTR(vhost_scsi, nexus, S_IRUGO | S_IWUSR);
2112 2126
2113static struct configfs_attribute *tcm_vhost_tpg_attrs[] = { 2127static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2114 &tcm_vhost_tpg_nexus.attr, 2128 &vhost_scsi_tpg_nexus.attr,
2115 NULL, 2129 NULL,
2116}; 2130};
2117 2131
2118static struct se_portal_group * 2132static struct se_portal_group *
2119tcm_vhost_make_tpg(struct se_wwn *wwn, 2133vhost_scsi_make_tpg(struct se_wwn *wwn,
2120 struct config_group *group, 2134 struct config_group *group,
2121 const char *name) 2135 const char *name)
2122{ 2136{
2123 struct tcm_vhost_tport *tport = container_of(wwn, 2137 struct vhost_scsi_tport *tport = container_of(wwn,
2124 struct tcm_vhost_tport, tport_wwn); 2138 struct vhost_scsi_tport, tport_wwn);
2125 2139
2126 struct tcm_vhost_tpg *tpg; 2140 struct vhost_scsi_tpg *tpg;
2127 unsigned long tpgt; 2141 u16 tpgt;
2128 int ret; 2142 int ret;
2129 2143
2130 if (strstr(name, "tpgt_") != name) 2144 if (strstr(name, "tpgt_") != name)
2131 return ERR_PTR(-EINVAL); 2145 return ERR_PTR(-EINVAL);
2132 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) 2146 if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2133 return ERR_PTR(-EINVAL); 2147 return ERR_PTR(-EINVAL);
2134 2148
2135 tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL); 2149 tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
2136 if (!tpg) { 2150 if (!tpg) {
2137 pr_err("Unable to allocate struct tcm_vhost_tpg"); 2151 pr_err("Unable to allocate struct vhost_scsi_tpg");
2138 return ERR_PTR(-ENOMEM); 2152 return ERR_PTR(-ENOMEM);
2139 } 2153 }
2140 mutex_init(&tpg->tv_tpg_mutex); 2154 mutex_init(&tpg->tv_tpg_mutex);
@@ -2142,31 +2156,31 @@ tcm_vhost_make_tpg(struct se_wwn *wwn,
2142 tpg->tport = tport; 2156 tpg->tport = tport;
2143 tpg->tport_tpgt = tpgt; 2157 tpg->tport_tpgt = tpgt;
2144 2158
2145 ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn, 2159 ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn,
2146 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 2160 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
2147 if (ret < 0) { 2161 if (ret < 0) {
2148 kfree(tpg); 2162 kfree(tpg);
2149 return NULL; 2163 return NULL;
2150 } 2164 }
2151 mutex_lock(&tcm_vhost_mutex); 2165 mutex_lock(&vhost_scsi_mutex);
2152 list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list); 2166 list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2153 mutex_unlock(&tcm_vhost_mutex); 2167 mutex_unlock(&vhost_scsi_mutex);
2154 2168
2155 return &tpg->se_tpg; 2169 return &tpg->se_tpg;
2156} 2170}
2157 2171
2158static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) 2172static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2159{ 2173{
2160 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 2174 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2161 struct tcm_vhost_tpg, se_tpg); 2175 struct vhost_scsi_tpg, se_tpg);
2162 2176
2163 mutex_lock(&tcm_vhost_mutex); 2177 mutex_lock(&vhost_scsi_mutex);
2164 list_del(&tpg->tv_tpg_list); 2178 list_del(&tpg->tv_tpg_list);
2165 mutex_unlock(&tcm_vhost_mutex); 2179 mutex_unlock(&vhost_scsi_mutex);
2166 /* 2180 /*
2167 * Release the virtual I_T Nexus for this vhost TPG 2181 * Release the virtual I_T Nexus for this vhost TPG
2168 */ 2182 */
2169 tcm_vhost_drop_nexus(tpg); 2183 vhost_scsi_drop_nexus(tpg);
2170 /* 2184 /*
2171 * Deregister the se_tpg from TCM.. 2185 * Deregister the se_tpg from TCM..
2172 */ 2186 */
@@ -2175,21 +2189,21 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
2175} 2189}
2176 2190
2177static struct se_wwn * 2191static struct se_wwn *
2178tcm_vhost_make_tport(struct target_fabric_configfs *tf, 2192vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2179 struct config_group *group, 2193 struct config_group *group,
2180 const char *name) 2194 const char *name)
2181{ 2195{
2182 struct tcm_vhost_tport *tport; 2196 struct vhost_scsi_tport *tport;
2183 char *ptr; 2197 char *ptr;
2184 u64 wwpn = 0; 2198 u64 wwpn = 0;
2185 int off = 0; 2199 int off = 0;
2186 2200
2187 /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) 2201 /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2188 return ERR_PTR(-EINVAL); */ 2202 return ERR_PTR(-EINVAL); */
2189 2203
2190 tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL); 2204 tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
2191 if (!tport) { 2205 if (!tport) {
2192 pr_err("Unable to allocate struct tcm_vhost_tport"); 2206 pr_err("Unable to allocate struct vhost_scsi_tport");
2193 return ERR_PTR(-ENOMEM); 2207 return ERR_PTR(-ENOMEM);
2194 } 2208 }
2195 tport->tport_wwpn = wwpn; 2209 tport->tport_wwpn = wwpn;
@@ -2220,102 +2234,102 @@ tcm_vhost_make_tport(struct target_fabric_configfs *tf,
2220 return ERR_PTR(-EINVAL); 2234 return ERR_PTR(-EINVAL);
2221 2235
2222check_len: 2236check_len:
2223 if (strlen(name) >= TCM_VHOST_NAMELEN) { 2237 if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2224 pr_err("Emulated %s Address: %s, exceeds" 2238 pr_err("Emulated %s Address: %s, exceeds"
2225 " max: %d\n", name, tcm_vhost_dump_proto_id(tport), 2239 " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2226 TCM_VHOST_NAMELEN); 2240 VHOST_SCSI_NAMELEN);
2227 kfree(tport); 2241 kfree(tport);
2228 return ERR_PTR(-EINVAL); 2242 return ERR_PTR(-EINVAL);
2229 } 2243 }
2230 snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]); 2244 snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2231 2245
2232 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" 2246 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2233 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name); 2247 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2234 2248
2235 return &tport->tport_wwn; 2249 return &tport->tport_wwn;
2236} 2250}
2237 2251
2238static void tcm_vhost_drop_tport(struct se_wwn *wwn) 2252static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2239{ 2253{
2240 struct tcm_vhost_tport *tport = container_of(wwn, 2254 struct vhost_scsi_tport *tport = container_of(wwn,
2241 struct tcm_vhost_tport, tport_wwn); 2255 struct vhost_scsi_tport, tport_wwn);
2242 2256
2243 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" 2257 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2244 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), 2258 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2245 tport->tport_name); 2259 tport->tport_name);
2246 2260
2247 kfree(tport); 2261 kfree(tport);
2248} 2262}
2249 2263
2250static ssize_t 2264static ssize_t
2251tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf, 2265vhost_scsi_wwn_show_attr_version(struct target_fabric_configfs *tf,
2252 char *page) 2266 char *page)
2253{ 2267{
2254 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" 2268 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2255 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, 2269 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2256 utsname()->machine); 2270 utsname()->machine);
2257} 2271}
2258 2272
2259TF_WWN_ATTR_RO(tcm_vhost, version); 2273TF_WWN_ATTR_RO(vhost_scsi, version);
2260 2274
2261static struct configfs_attribute *tcm_vhost_wwn_attrs[] = { 2275static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2262 &tcm_vhost_wwn_version.attr, 2276 &vhost_scsi_wwn_version.attr,
2263 NULL, 2277 NULL,
2264}; 2278};
2265 2279
2266static struct target_core_fabric_ops tcm_vhost_ops = { 2280static struct target_core_fabric_ops vhost_scsi_ops = {
2267 .get_fabric_name = tcm_vhost_get_fabric_name, 2281 .get_fabric_name = vhost_scsi_get_fabric_name,
2268 .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident, 2282 .get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident,
2269 .tpg_get_wwn = tcm_vhost_get_fabric_wwn, 2283 .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
2270 .tpg_get_tag = tcm_vhost_get_tag, 2284 .tpg_get_tag = vhost_scsi_get_tpgt,
2271 .tpg_get_default_depth = tcm_vhost_get_default_depth, 2285 .tpg_get_default_depth = vhost_scsi_get_default_depth,
2272 .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id, 2286 .tpg_get_pr_transport_id = vhost_scsi_get_pr_transport_id,
2273 .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len, 2287 .tpg_get_pr_transport_id_len = vhost_scsi_get_pr_transport_id_len,
2274 .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id, 2288 .tpg_parse_pr_out_transport_id = vhost_scsi_parse_pr_out_transport_id,
2275 .tpg_check_demo_mode = tcm_vhost_check_true, 2289 .tpg_check_demo_mode = vhost_scsi_check_true,
2276 .tpg_check_demo_mode_cache = tcm_vhost_check_true, 2290 .tpg_check_demo_mode_cache = vhost_scsi_check_true,
2277 .tpg_check_demo_mode_write_protect = tcm_vhost_check_false, 2291 .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2278 .tpg_check_prod_mode_write_protect = tcm_vhost_check_false, 2292 .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2279 .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl, 2293 .tpg_alloc_fabric_acl = vhost_scsi_alloc_fabric_acl,
2280 .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl, 2294 .tpg_release_fabric_acl = vhost_scsi_release_fabric_acl,
2281 .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index, 2295 .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
2282 .release_cmd = tcm_vhost_release_cmd, 2296 .release_cmd = vhost_scsi_release_cmd,
2283 .check_stop_free = vhost_scsi_check_stop_free, 2297 .check_stop_free = vhost_scsi_check_stop_free,
2284 .shutdown_session = tcm_vhost_shutdown_session, 2298 .shutdown_session = vhost_scsi_shutdown_session,
2285 .close_session = tcm_vhost_close_session, 2299 .close_session = vhost_scsi_close_session,
2286 .sess_get_index = tcm_vhost_sess_get_index, 2300 .sess_get_index = vhost_scsi_sess_get_index,
2287 .sess_get_initiator_sid = NULL, 2301 .sess_get_initiator_sid = NULL,
2288 .write_pending = tcm_vhost_write_pending, 2302 .write_pending = vhost_scsi_write_pending,
2289 .write_pending_status = tcm_vhost_write_pending_status, 2303 .write_pending_status = vhost_scsi_write_pending_status,
2290 .set_default_node_attributes = tcm_vhost_set_default_node_attrs, 2304 .set_default_node_attributes = vhost_scsi_set_default_node_attrs,
2291 .get_task_tag = tcm_vhost_get_task_tag, 2305 .get_task_tag = vhost_scsi_get_task_tag,
2292 .get_cmd_state = tcm_vhost_get_cmd_state, 2306 .get_cmd_state = vhost_scsi_get_cmd_state,
2293 .queue_data_in = tcm_vhost_queue_data_in, 2307 .queue_data_in = vhost_scsi_queue_data_in,
2294 .queue_status = tcm_vhost_queue_status, 2308 .queue_status = vhost_scsi_queue_status,
2295 .queue_tm_rsp = tcm_vhost_queue_tm_rsp, 2309 .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
2296 .aborted_task = tcm_vhost_aborted_task, 2310 .aborted_task = vhost_scsi_aborted_task,
2297 /* 2311 /*
2298 * Setup callers for generic logic in target_core_fabric_configfs.c 2312 * Setup callers for generic logic in target_core_fabric_configfs.c
2299 */ 2313 */
2300 .fabric_make_wwn = tcm_vhost_make_tport, 2314 .fabric_make_wwn = vhost_scsi_make_tport,
2301 .fabric_drop_wwn = tcm_vhost_drop_tport, 2315 .fabric_drop_wwn = vhost_scsi_drop_tport,
2302 .fabric_make_tpg = tcm_vhost_make_tpg, 2316 .fabric_make_tpg = vhost_scsi_make_tpg,
2303 .fabric_drop_tpg = tcm_vhost_drop_tpg, 2317 .fabric_drop_tpg = vhost_scsi_drop_tpg,
2304 .fabric_post_link = tcm_vhost_port_link, 2318 .fabric_post_link = vhost_scsi_port_link,
2305 .fabric_pre_unlink = tcm_vhost_port_unlink, 2319 .fabric_pre_unlink = vhost_scsi_port_unlink,
2306 .fabric_make_np = NULL, 2320 .fabric_make_np = NULL,
2307 .fabric_drop_np = NULL, 2321 .fabric_drop_np = NULL,
2308 .fabric_make_nodeacl = tcm_vhost_make_nodeacl, 2322 .fabric_make_nodeacl = vhost_scsi_make_nodeacl,
2309 .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl, 2323 .fabric_drop_nodeacl = vhost_scsi_drop_nodeacl,
2310}; 2324};
2311 2325
2312static int tcm_vhost_register_configfs(void) 2326static int vhost_scsi_register_configfs(void)
2313{ 2327{
2314 struct target_fabric_configfs *fabric; 2328 struct target_fabric_configfs *fabric;
2315 int ret; 2329 int ret;
2316 2330
2317 pr_debug("TCM_VHOST fabric module %s on %s/%s" 2331 pr_debug("vhost-scsi fabric module %s on %s/%s"
2318 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, 2332 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2319 utsname()->machine); 2333 utsname()->machine);
2320 /* 2334 /*
2321 * Register the top level struct config_item_type with TCM core 2335 * Register the top level struct config_item_type with TCM core
@@ -2326,14 +2340,14 @@ static int tcm_vhost_register_configfs(void)
2326 return PTR_ERR(fabric); 2340 return PTR_ERR(fabric);
2327 } 2341 }
2328 /* 2342 /*
2329 * Setup fabric->tf_ops from our local tcm_vhost_ops 2343 * Setup fabric->tf_ops from our local vhost_scsi_ops
2330 */ 2344 */
2331 fabric->tf_ops = tcm_vhost_ops; 2345 fabric->tf_ops = vhost_scsi_ops;
2332 /* 2346 /*
2333 * Setup default attribute lists for various fabric->tf_cit_tmpl 2347 * Setup default attribute lists for various fabric->tf_cit_tmpl
2334 */ 2348 */
2335 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; 2349 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = vhost_scsi_wwn_attrs;
2336 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; 2350 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = vhost_scsi_tpg_attrs;
2337 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 2351 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
2338 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 2352 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2339 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 2353 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
@@ -2353,37 +2367,37 @@ static int tcm_vhost_register_configfs(void)
2353 /* 2367 /*
2354 * Setup our local pointer to *fabric 2368 * Setup our local pointer to *fabric
2355 */ 2369 */
2356 tcm_vhost_fabric_configfs = fabric; 2370 vhost_scsi_fabric_configfs = fabric;
2357 pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n"); 2371 pr_debug("TCM_VHOST[0] - Set fabric -> vhost_scsi_fabric_configfs\n");
2358 return 0; 2372 return 0;
2359}; 2373};
2360 2374
2361static void tcm_vhost_deregister_configfs(void) 2375static void vhost_scsi_deregister_configfs(void)
2362{ 2376{
2363 if (!tcm_vhost_fabric_configfs) 2377 if (!vhost_scsi_fabric_configfs)
2364 return; 2378 return;
2365 2379
2366 target_fabric_configfs_deregister(tcm_vhost_fabric_configfs); 2380 target_fabric_configfs_deregister(vhost_scsi_fabric_configfs);
2367 tcm_vhost_fabric_configfs = NULL; 2381 vhost_scsi_fabric_configfs = NULL;
2368 pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n"); 2382 pr_debug("TCM_VHOST[0] - Cleared vhost_scsi_fabric_configfs\n");
2369}; 2383};
2370 2384
2371static int __init tcm_vhost_init(void) 2385static int __init vhost_scsi_init(void)
2372{ 2386{
2373 int ret = -ENOMEM; 2387 int ret = -ENOMEM;
2374 /* 2388 /*
2375 * Use our own dedicated workqueue for submitting I/O into 2389 * Use our own dedicated workqueue for submitting I/O into
2376 * target core to avoid contention within system_wq. 2390 * target core to avoid contention within system_wq.
2377 */ 2391 */
2378 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0); 2392 vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2379 if (!tcm_vhost_workqueue) 2393 if (!vhost_scsi_workqueue)
2380 goto out; 2394 goto out;
2381 2395
2382 ret = vhost_scsi_register(); 2396 ret = vhost_scsi_register();
2383 if (ret < 0) 2397 if (ret < 0)
2384 goto out_destroy_workqueue; 2398 goto out_destroy_workqueue;
2385 2399
2386 ret = tcm_vhost_register_configfs(); 2400 ret = vhost_scsi_register_configfs();
2387 if (ret < 0) 2401 if (ret < 0)
2388 goto out_vhost_scsi_deregister; 2402 goto out_vhost_scsi_deregister;
2389 2403
@@ -2392,20 +2406,20 @@ static int __init tcm_vhost_init(void)
2392out_vhost_scsi_deregister: 2406out_vhost_scsi_deregister:
2393 vhost_scsi_deregister(); 2407 vhost_scsi_deregister();
2394out_destroy_workqueue: 2408out_destroy_workqueue:
2395 destroy_workqueue(tcm_vhost_workqueue); 2409 destroy_workqueue(vhost_scsi_workqueue);
2396out: 2410out:
2397 return ret; 2411 return ret;
2398}; 2412};
2399 2413
2400static void tcm_vhost_exit(void) 2414static void vhost_scsi_exit(void)
2401{ 2415{
2402 tcm_vhost_deregister_configfs(); 2416 vhost_scsi_deregister_configfs();
2403 vhost_scsi_deregister(); 2417 vhost_scsi_deregister();
2404 destroy_workqueue(tcm_vhost_workqueue); 2418 destroy_workqueue(vhost_scsi_workqueue);
2405}; 2419};
2406 2420
2407MODULE_DESCRIPTION("VHOST_SCSI series fabric driver"); 2421MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2408MODULE_ALIAS("tcm_vhost"); 2422MODULE_ALIAS("tcm_vhost");
2409MODULE_LICENSE("GPL"); 2423MODULE_LICENSE("GPL");
2410module_init(tcm_vhost_init); 2424module_init(vhost_scsi_init);
2411module_exit(tcm_vhost_exit); 2425module_exit(vhost_scsi_exit);
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 00b228638274..b546da5d8ea3 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -12,16 +12,32 @@ config VIRTIO_PCI
12 depends on PCI 12 depends on PCI
13 select VIRTIO 13 select VIRTIO
14 ---help--- 14 ---help---
15 This drivers provides support for virtio based paravirtual device 15 This driver provides support for virtio based paravirtual device
16 drivers over PCI. This requires that your VMM has appropriate PCI 16 drivers over PCI. This requires that your VMM has appropriate PCI
17 virtio backends. Most QEMU based VMMs should support these devices 17 virtio backends. Most QEMU based VMMs should support these devices
18 (like KVM or Xen). 18 (like KVM or Xen).
19 19
20 Currently, the ABI is not considered stable so there is no guarantee
21 that this version of the driver will work with your VMM.
22
23 If unsure, say M. 20 If unsure, say M.
24 21
22config VIRTIO_PCI_LEGACY
23 bool "Support for legacy virtio draft 0.9.X and older devices"
24 default y
25 depends on VIRTIO_PCI
26 ---help---
27 Virtio PCI Card 0.9.X Draft (circa 2014) and older device support.
28
29 This option enables building a transitional driver, supporting
30 both devices conforming to Virtio 1 specification, and legacy devices.
31 If disabled, you get a slightly smaller, non-transitional driver,
32 with no legacy compatibility.
33
34 So look out into your driveway. Do you have a flying car? If
35 so, you can happily disable this option and virtio will not
36 break. Otherwise, leave it set. Unless you're testing what
37 life will be like in The Future.
38
39 If unsure, say Y.
40
25config VIRTIO_BALLOON 41config VIRTIO_BALLOON
26 tristate "Virtio balloon driver" 42 tristate "Virtio balloon driver"
27 depends on VIRTIO 43 depends on VIRTIO
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index bf5104b56894..d85565b8ea46 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,5 +1,6 @@
1obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o 1obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
2obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o 2obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
3obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o 3obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
4virtio_pci-y := virtio_pci_legacy.o virtio_pci_common.o 4virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
5virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
5obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o 6obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index b9f70dfc4751..5ce2aa48fc6e 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -236,7 +236,10 @@ static int virtio_dev_probe(struct device *_d)
236 if (err) 236 if (err)
237 goto err; 237 goto err;
238 238
239 add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); 239 /* If probe didn't do it, mark device DRIVER_OK ourselves. */
240 if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
241 virtio_device_ready(dev);
242
240 if (drv->scan) 243 if (drv->scan)
241 drv->scan(dev); 244 drv->scan(dev);
242 245
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 50c5f42d7a9f..0413157f3b49 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -44,8 +44,7 @@ static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES;
44module_param(oom_pages, int, S_IRUSR | S_IWUSR); 44module_param(oom_pages, int, S_IRUSR | S_IWUSR);
45MODULE_PARM_DESC(oom_pages, "pages to free on OOM"); 45MODULE_PARM_DESC(oom_pages, "pages to free on OOM");
46 46
47struct virtio_balloon 47struct virtio_balloon {
48{
49 struct virtio_device *vdev; 48 struct virtio_device *vdev;
50 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq; 49 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
51 50
@@ -466,6 +465,12 @@ static int virtballoon_probe(struct virtio_device *vdev)
466 struct virtio_balloon *vb; 465 struct virtio_balloon *vb;
467 int err; 466 int err;
468 467
468 if (!vdev->config->get) {
469 dev_err(&vdev->dev, "%s failure: config access disabled\n",
470 __func__);
471 return -EINVAL;
472 }
473
469 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); 474 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
470 if (!vb) { 475 if (!vb) {
471 err = -ENOMEM; 476 err = -ENOMEM;
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 00d115b22bd8..cad569890908 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Virtio memory mapped device driver 2 * Virtio memory mapped device driver
3 * 3 *
4 * Copyright 2011, ARM Ltd. 4 * Copyright 2011-2014, ARM Ltd.
5 * 5 *
6 * This module allows virtio devices to be used over a virtual, memory mapped 6 * This module allows virtio devices to be used over a virtual, memory mapped
7 * platform device. 7 * platform device.
@@ -50,36 +50,6 @@
50 * 50 *
51 * 51 *
52 * 52 *
53 * Registers layout (all 32-bit wide):
54 *
55 * offset d. name description
56 * ------ -- ---------------- -----------------
57 *
58 * 0x000 R MagicValue Magic value "virt"
59 * 0x004 R Version Device version (current max. 1)
60 * 0x008 R DeviceID Virtio device ID
61 * 0x00c R VendorID Virtio vendor ID
62 *
63 * 0x010 R HostFeatures Features supported by the host
64 * 0x014 W HostFeaturesSel Set of host features to access via HostFeatures
65 *
66 * 0x020 W GuestFeatures Features activated by the guest
67 * 0x024 W GuestFeaturesSel Set of activated features to set via GuestFeatures
68 * 0x028 W GuestPageSize Size of guest's memory page in bytes
69 *
70 * 0x030 W QueueSel Queue selector
71 * 0x034 R QueueNumMax Maximum size of the currently selected queue
72 * 0x038 W QueueNum Queue size for the currently selected queue
73 * 0x03c W QueueAlign Used Ring alignment for the current queue
74 * 0x040 RW QueuePFN PFN for the currently selected queue
75 *
76 * 0x050 W QueueNotify Queue notifier
77 * 0x060 R InterruptStatus Interrupt status register
78 * 0x064 W InterruptACK Interrupt acknowledge register
79 * 0x070 RW Status Device status register
80 *
81 * 0x100+ RW Device-specific configuration space
82 *
83 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 53 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
84 * 54 *
85 * This work is licensed under the terms of the GNU GPL, version 2 or later. 55 * This work is licensed under the terms of the GNU GPL, version 2 or later.
@@ -145,11 +115,16 @@ struct virtio_mmio_vq_info {
145static u64 vm_get_features(struct virtio_device *vdev) 115static u64 vm_get_features(struct virtio_device *vdev)
146{ 116{
147 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 117 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
118 u64 features;
119
120 writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
121 features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
122 features <<= 32;
148 123
149 /* TODO: Features > 32 bits */ 124 writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
150 writel(0, vm_dev->base + VIRTIO_MMIO_HOST_FEATURES_SEL); 125 features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
151 126
152 return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES); 127 return features;
153} 128}
154 129
155static int vm_finalize_features(struct virtio_device *vdev) 130static int vm_finalize_features(struct virtio_device *vdev)
@@ -159,11 +134,20 @@ static int vm_finalize_features(struct virtio_device *vdev)
159 /* Give virtio_ring a chance to accept features. */ 134 /* Give virtio_ring a chance to accept features. */
160 vring_transport_features(vdev); 135 vring_transport_features(vdev);
161 136
162 /* Make sure we don't have any features > 32 bits! */ 137 /* Make sure there is are no mixed devices */
163 BUG_ON((u32)vdev->features != vdev->features); 138 if (vm_dev->version == 2 &&
139 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
140 dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
141 return -EINVAL;
142 }
143
144 writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
145 writel((u32)(vdev->features >> 32),
146 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
164 147
165 writel(0, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL); 148 writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
166 writel(vdev->features, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); 149 writel((u32)vdev->features,
150 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
167 151
168 return 0; 152 return 0;
169} 153}
@@ -275,7 +259,12 @@ static void vm_del_vq(struct virtqueue *vq)
275 259
276 /* Select and deactivate the queue */ 260 /* Select and deactivate the queue */
277 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); 261 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
278 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 262 if (vm_dev->version == 1) {
263 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
264 } else {
265 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
266 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
267 }
279 268
280 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); 269 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN));
281 free_pages_exact(info->queue, size); 270 free_pages_exact(info->queue, size);
@@ -312,7 +301,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
312 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); 301 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
313 302
314 /* Queue shouldn't already be set up. */ 303 /* Queue shouldn't already be set up. */
315 if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) { 304 if (readl(vm_dev->base + (vm_dev->version == 1 ?
305 VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
316 err = -ENOENT; 306 err = -ENOENT;
317 goto error_available; 307 goto error_available;
318 } 308 }
@@ -356,13 +346,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
356 info->num /= 2; 346 info->num /= 2;
357 } 347 }
358 348
359 /* Activate the queue */
360 writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
361 writel(VIRTIO_MMIO_VRING_ALIGN,
362 vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
363 writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
364 vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
365
366 /* Create the vring */ 349 /* Create the vring */
367 vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev, 350 vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
368 true, info->queue, vm_notify, callback, name); 351 true, info->queue, vm_notify, callback, name);
@@ -371,6 +354,33 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
371 goto error_new_virtqueue; 354 goto error_new_virtqueue;
372 } 355 }
373 356
357 /* Activate the queue */
358 writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
359 if (vm_dev->version == 1) {
360 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
361 writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
362 vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
363 } else {
364 u64 addr;
365
366 addr = virt_to_phys(info->queue);
367 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
368 writel((u32)(addr >> 32),
369 vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
370
371 addr = virt_to_phys(virtqueue_get_avail(vq));
372 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
373 writel((u32)(addr >> 32),
374 vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
375
376 addr = virt_to_phys(virtqueue_get_used(vq));
377 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
378 writel((u32)(addr >> 32),
379 vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
380
381 writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
382 }
383
374 vq->priv = info; 384 vq->priv = info;
375 info->vq = vq; 385 info->vq = vq;
376 386
@@ -381,7 +391,12 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
381 return vq; 391 return vq;
382 392
383error_new_virtqueue: 393error_new_virtqueue:
384 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 394 if (vm_dev->version == 1) {
395 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
396 } else {
397 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
398 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
399 }
385 free_pages_exact(info->queue, size); 400 free_pages_exact(info->queue, size);
386error_alloc_pages: 401error_alloc_pages:
387 kfree(info); 402 kfree(info);
@@ -476,16 +491,32 @@ static int virtio_mmio_probe(struct platform_device *pdev)
476 491
477 /* Check device version */ 492 /* Check device version */
478 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION); 493 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
479 if (vm_dev->version != 1) { 494 if (vm_dev->version < 1 || vm_dev->version > 2) {
480 dev_err(&pdev->dev, "Version %ld not supported!\n", 495 dev_err(&pdev->dev, "Version %ld not supported!\n",
481 vm_dev->version); 496 vm_dev->version);
482 return -ENXIO; 497 return -ENXIO;
483 } 498 }
484 499
485 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); 500 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
501 if (vm_dev->vdev.id.device == 0) {
502 /*
503 * virtio-mmio device with an ID 0 is a (dummy) placeholder
504 * with no function. End probing now with no error reported.
505 */
506 return -ENODEV;
507 }
486 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); 508 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
487 509
488 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); 510 /* Reject legacy-only IDs for version 2 devices */
511 if (vm_dev->version == 2 &&
512 virtio_device_is_legacy_only(vm_dev->vdev.id)) {
513 dev_err(&pdev->dev, "Version 2 not supported for devices %u!\n",
514 vm_dev->vdev.id.device);
515 return -ENODEV;
516 }
517
518 if (vm_dev->version == 1)
519 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
489 520
490 platform_set_drvdata(pdev, vm_dev); 521 platform_set_drvdata(pdev, vm_dev);
491 522
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 9756f21b809e..e894eb278d83 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -19,6 +19,14 @@
19 19
20#include "virtio_pci_common.h" 20#include "virtio_pci_common.h"
21 21
22static bool force_legacy = false;
23
24#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
25module_param(force_legacy, bool, 0444);
26MODULE_PARM_DESC(force_legacy,
27 "Force legacy mode for transitional virtio 1 devices");
28#endif
29
22/* wait for pending irq handlers */ 30/* wait for pending irq handlers */
23void vp_synchronize_vectors(struct virtio_device *vdev) 31void vp_synchronize_vectors(struct virtio_device *vdev)
24{ 32{
@@ -464,15 +472,97 @@ static const struct pci_device_id virtio_pci_id_table[] = {
464 472
465MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); 473MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
466 474
475static void virtio_pci_release_dev(struct device *_d)
476{
477 struct virtio_device *vdev = dev_to_virtio(_d);
478 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
479
480 /* As struct device is a kobject, it's not safe to
481 * free the memory (including the reference counter itself)
482 * until it's release callback. */
483 kfree(vp_dev);
484}
485
467static int virtio_pci_probe(struct pci_dev *pci_dev, 486static int virtio_pci_probe(struct pci_dev *pci_dev,
468 const struct pci_device_id *id) 487 const struct pci_device_id *id)
469{ 488{
470 return virtio_pci_legacy_probe(pci_dev, id); 489 struct virtio_pci_device *vp_dev;
490 int rc;
491
492 /* allocate our structure and fill it out */
493 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
494 if (!vp_dev)
495 return -ENOMEM;
496
497 pci_set_drvdata(pci_dev, vp_dev);
498 vp_dev->vdev.dev.parent = &pci_dev->dev;
499 vp_dev->vdev.dev.release = virtio_pci_release_dev;
500 vp_dev->pci_dev = pci_dev;
501 INIT_LIST_HEAD(&vp_dev->virtqueues);
502 spin_lock_init(&vp_dev->lock);
503
504 /* Disable MSI/MSIX to bring device to a known good state. */
505 pci_msi_off(pci_dev);
506
507 /* enable the device */
508 rc = pci_enable_device(pci_dev);
509 if (rc)
510 goto err_enable_device;
511
512 rc = pci_request_regions(pci_dev, "virtio-pci");
513 if (rc)
514 goto err_request_regions;
515
516 if (force_legacy) {
517 rc = virtio_pci_legacy_probe(vp_dev);
518 /* Also try modern mode if we can't map BAR0 (no IO space). */
519 if (rc == -ENODEV || rc == -ENOMEM)
520 rc = virtio_pci_modern_probe(vp_dev);
521 if (rc)
522 goto err_probe;
523 } else {
524 rc = virtio_pci_modern_probe(vp_dev);
525 if (rc == -ENODEV)
526 rc = virtio_pci_legacy_probe(vp_dev);
527 if (rc)
528 goto err_probe;
529 }
530
531 pci_set_master(pci_dev);
532
533 rc = register_virtio_device(&vp_dev->vdev);
534 if (rc)
535 goto err_register;
536
537 return 0;
538
539err_register:
540 if (vp_dev->ioaddr)
541 virtio_pci_legacy_remove(vp_dev);
542 else
543 virtio_pci_modern_remove(vp_dev);
544err_probe:
545 pci_release_regions(pci_dev);
546err_request_regions:
547 pci_disable_device(pci_dev);
548err_enable_device:
549 kfree(vp_dev);
550 return rc;
471} 551}
472 552
473static void virtio_pci_remove(struct pci_dev *pci_dev) 553static void virtio_pci_remove(struct pci_dev *pci_dev)
474{ 554{
475 virtio_pci_legacy_remove(pci_dev); 555 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
556
557 unregister_virtio_device(&vp_dev->vdev);
558
559 if (vp_dev->ioaddr)
560 virtio_pci_legacy_remove(vp_dev);
561 else
562 virtio_pci_modern_remove(vp_dev);
563
564 pci_release_regions(pci_dev);
565 pci_disable_device(pci_dev);
476} 566}
477 567
478static struct pci_driver virtio_pci_driver = { 568static struct pci_driver virtio_pci_driver = {
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 5a497289b7e9..28ee4e56badf 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -53,12 +53,32 @@ struct virtio_pci_device {
53 struct virtio_device vdev; 53 struct virtio_device vdev;
54 struct pci_dev *pci_dev; 54 struct pci_dev *pci_dev;
55 55
56 /* In legacy mode, these two point to within ->legacy. */
57 /* Where to read and clear interrupt */
58 u8 __iomem *isr;
59
60 /* Modern only fields */
61 /* The IO mapping for the PCI config space (non-legacy mode) */
62 struct virtio_pci_common_cfg __iomem *common;
63 /* Device-specific data (non-legacy mode) */
64 void __iomem *device;
65 /* Base of vq notifications (non-legacy mode). */
66 void __iomem *notify_base;
67
68 /* So we can sanity-check accesses. */
69 size_t notify_len;
70 size_t device_len;
71
72 /* Capability for when we need to map notifications per-vq. */
73 int notify_map_cap;
74
75 /* Multiply queue_notify_off by this value. (non-legacy mode). */
76 u32 notify_offset_multiplier;
77
78 /* Legacy only field */
56 /* the IO mapping for the PCI config space */ 79 /* the IO mapping for the PCI config space */
57 void __iomem *ioaddr; 80 void __iomem *ioaddr;
58 81
59 /* the IO mapping for ISR operation */
60 void __iomem *isr;
61
62 /* a list of queues so we can dispatch IRQs */ 82 /* a list of queues so we can dispatch IRQs */
63 spinlock_t lock; 83 spinlock_t lock;
64 struct list_head virtqueues; 84 struct list_head virtqueues;
@@ -127,8 +147,19 @@ const char *vp_bus_name(struct virtio_device *vdev);
127 */ 147 */
128int vp_set_vq_affinity(struct virtqueue *vq, int cpu); 148int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
129 149
130int virtio_pci_legacy_probe(struct pci_dev *pci_dev, 150#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
131 const struct pci_device_id *id); 151int virtio_pci_legacy_probe(struct virtio_pci_device *);
132void virtio_pci_legacy_remove(struct pci_dev *pci_dev); 152void virtio_pci_legacy_remove(struct virtio_pci_device *);
153#else
154static inline int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
155{
156 return -ENODEV;
157}
158static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
159{
160}
161#endif
162int virtio_pci_modern_probe(struct virtio_pci_device *);
163void virtio_pci_modern_remove(struct virtio_pci_device *);
133 164
134#endif 165#endif
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index a5486e65e04b..256a5278a515 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -211,23 +211,10 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
211 .set_vq_affinity = vp_set_vq_affinity, 211 .set_vq_affinity = vp_set_vq_affinity,
212}; 212};
213 213
214static void virtio_pci_release_dev(struct device *_d)
215{
216 struct virtio_device *vdev = dev_to_virtio(_d);
217 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
218
219 /* As struct device is a kobject, it's not safe to
220 * free the memory (including the reference counter itself)
221 * until it's release callback. */
222 kfree(vp_dev);
223}
224
225/* the PCI probing function */ 214/* the PCI probing function */
226int virtio_pci_legacy_probe(struct pci_dev *pci_dev, 215int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
227 const struct pci_device_id *id)
228{ 216{
229 struct virtio_pci_device *vp_dev; 217 struct pci_dev *pci_dev = vp_dev->pci_dev;
230 int err;
231 218
232 /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ 219 /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
233 if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) 220 if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
@@ -239,41 +226,12 @@ int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
239 return -ENODEV; 226 return -ENODEV;
240 } 227 }
241 228
242 /* allocate our structure and fill it out */
243 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
244 if (vp_dev == NULL)
245 return -ENOMEM;
246
247 vp_dev->vdev.dev.parent = &pci_dev->dev;
248 vp_dev->vdev.dev.release = virtio_pci_release_dev;
249 vp_dev->vdev.config = &virtio_pci_config_ops;
250 vp_dev->pci_dev = pci_dev;
251 INIT_LIST_HEAD(&vp_dev->virtqueues);
252 spin_lock_init(&vp_dev->lock);
253
254 /* Disable MSI/MSIX to bring device to a known good state. */
255 pci_msi_off(pci_dev);
256
257 /* enable the device */
258 err = pci_enable_device(pci_dev);
259 if (err)
260 goto out;
261
262 err = pci_request_regions(pci_dev, "virtio-pci");
263 if (err)
264 goto out_enable_device;
265
266 vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); 229 vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
267 if (vp_dev->ioaddr == NULL) { 230 if (!vp_dev->ioaddr)
268 err = -ENOMEM; 231 return -ENOMEM;
269 goto out_req_regions;
270 }
271 232
272 vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR; 233 vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
273 234
274 pci_set_drvdata(pci_dev, vp_dev);
275 pci_set_master(pci_dev);
276
277 /* we use the subsystem vendor/device id as the virtio vendor/device 235 /* we use the subsystem vendor/device id as the virtio vendor/device
278 * id. this allows us to use the same PCI vendor/device id for all 236 * id. this allows us to use the same PCI vendor/device id for all
279 * virtio devices and to identify the particular virtio driver by 237 * virtio devices and to identify the particular virtio driver by
@@ -281,36 +239,18 @@ int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
281 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; 239 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
282 vp_dev->vdev.id.device = pci_dev->subsystem_device; 240 vp_dev->vdev.id.device = pci_dev->subsystem_device;
283 241
242 vp_dev->vdev.config = &virtio_pci_config_ops;
243
284 vp_dev->config_vector = vp_config_vector; 244 vp_dev->config_vector = vp_config_vector;
285 vp_dev->setup_vq = setup_vq; 245 vp_dev->setup_vq = setup_vq;
286 vp_dev->del_vq = del_vq; 246 vp_dev->del_vq = del_vq;
287 247
288 /* finally register the virtio device */
289 err = register_virtio_device(&vp_dev->vdev);
290 if (err)
291 goto out_set_drvdata;
292
293 return 0; 248 return 0;
294
295out_set_drvdata:
296 pci_iounmap(pci_dev, vp_dev->ioaddr);
297out_req_regions:
298 pci_release_regions(pci_dev);
299out_enable_device:
300 pci_disable_device(pci_dev);
301out:
302 kfree(vp_dev);
303 return err;
304} 249}
305 250
306void virtio_pci_legacy_remove(struct pci_dev *pci_dev) 251void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
307{ 252{
308 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 253 struct pci_dev *pci_dev = vp_dev->pci_dev;
309
310 unregister_virtio_device(&vp_dev->vdev);
311 254
312 vp_del_vqs(&vp_dev->vdev);
313 pci_iounmap(pci_dev, vp_dev->ioaddr); 255 pci_iounmap(pci_dev, vp_dev->ioaddr);
314 pci_release_regions(pci_dev);
315 pci_disable_device(pci_dev);
316} 256}
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
new file mode 100644
index 000000000000..2aa38e59db2e
--- /dev/null
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -0,0 +1,695 @@
1/*
2 * Virtio PCI driver - modern (virtio 1.0) device support
3 *
4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
6 *
7 * Copyright IBM Corp. 2007
8 * Copyright Red Hat, Inc. 2014
9 *
10 * Authors:
11 * Anthony Liguori <aliguori@us.ibm.com>
12 * Rusty Russell <rusty@rustcorp.com.au>
13 * Michael S. Tsirkin <mst@redhat.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2 or later.
16 * See the COPYING file in the top-level directory.
17 *
18 */
19
20#define VIRTIO_PCI_NO_LEGACY
21#include "virtio_pci_common.h"
22
23static void __iomem *map_capability(struct pci_dev *dev, int off,
24 size_t minlen,
25 u32 align,
26 u32 start, u32 size,
27 size_t *len)
28{
29 u8 bar;
30 u32 offset, length;
31 void __iomem *p;
32
33 pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
34 bar),
35 &bar);
36 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
37 &offset);
38 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
39 &length);
40
41 if (length <= start) {
42 dev_err(&dev->dev,
43 "virtio_pci: bad capability len %u (>%u expected)\n",
44 length, start);
45 return NULL;
46 }
47
48 if (length - start < minlen) {
49 dev_err(&dev->dev,
50 "virtio_pci: bad capability len %u (>=%zu expected)\n",
51 length, minlen);
52 return NULL;
53 }
54
55 length -= start;
56
57 if (start + offset < offset) {
58 dev_err(&dev->dev,
59 "virtio_pci: map wrap-around %u+%u\n",
60 start, offset);
61 return NULL;
62 }
63
64 offset += start;
65
66 if (offset & (align - 1)) {
67 dev_err(&dev->dev,
68 "virtio_pci: offset %u not aligned to %u\n",
69 offset, align);
70 return NULL;
71 }
72
73 if (length > size)
74 length = size;
75
76 if (len)
77 *len = length;
78
79 if (minlen + offset < minlen ||
80 minlen + offset > pci_resource_len(dev, bar)) {
81 dev_err(&dev->dev,
82 "virtio_pci: map virtio %zu@%u "
83 "out of range on bar %i length %lu\n",
84 minlen, offset,
85 bar, (unsigned long)pci_resource_len(dev, bar));
86 return NULL;
87 }
88
89 p = pci_iomap_range(dev, bar, offset, length);
90 if (!p)
91 dev_err(&dev->dev,
92 "virtio_pci: unable to map virtio %u@%u on bar %i\n",
93 length, offset, bar);
94 return p;
95}
96
97static void iowrite64_twopart(u64 val, __le32 __iomem *lo, __le32 __iomem *hi)
98{
99 iowrite32((u32)val, lo);
100 iowrite32(val >> 32, hi);
101}
102
103/* virtio config->get_features() implementation */
104static u64 vp_get_features(struct virtio_device *vdev)
105{
106 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
107 u64 features;
108
109 iowrite32(0, &vp_dev->common->device_feature_select);
110 features = ioread32(&vp_dev->common->device_feature);
111 iowrite32(1, &vp_dev->common->device_feature_select);
112 features |= ((u64)ioread32(&vp_dev->common->device_feature) << 32);
113
114 return features;
115}
116
117/* virtio config->finalize_features() implementation */
118static int vp_finalize_features(struct virtio_device *vdev)
119{
120 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
121
122 /* Give virtio_ring a chance to accept features. */
123 vring_transport_features(vdev);
124
125 if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
126 dev_err(&vdev->dev, "virtio: device uses modern interface "
127 "but does not have VIRTIO_F_VERSION_1\n");
128 return -EINVAL;
129 }
130
131 iowrite32(0, &vp_dev->common->guest_feature_select);
132 iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
133 iowrite32(1, &vp_dev->common->guest_feature_select);
134 iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
135
136 return 0;
137}
138
139/* virtio config->get() implementation */
140static void vp_get(struct virtio_device *vdev, unsigned offset,
141 void *buf, unsigned len)
142{
143 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
144 u8 b;
145 __le16 w;
146 __le32 l;
147
148 BUG_ON(offset + len > vp_dev->device_len);
149
150 switch (len) {
151 case 1:
152 b = ioread8(vp_dev->device + offset);
153 memcpy(buf, &b, sizeof b);
154 break;
155 case 2:
156 w = cpu_to_le16(ioread16(vp_dev->device + offset));
157 memcpy(buf, &w, sizeof w);
158 break;
159 case 4:
160 l = cpu_to_le32(ioread32(vp_dev->device + offset));
161 memcpy(buf, &l, sizeof l);
162 break;
163 case 8:
164 l = cpu_to_le32(ioread32(vp_dev->device + offset));
165 memcpy(buf, &l, sizeof l);
166 l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l));
167 memcpy(buf + sizeof l, &l, sizeof l);
168 break;
169 default:
170 BUG();
171 }
172}
173
174/* the config->set() implementation. it's symmetric to the config->get()
175 * implementation */
176static void vp_set(struct virtio_device *vdev, unsigned offset,
177 const void *buf, unsigned len)
178{
179 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
180 u8 b;
181 __le16 w;
182 __le32 l;
183
184 BUG_ON(offset + len > vp_dev->device_len);
185
186 switch (len) {
187 case 1:
188 memcpy(&b, buf, sizeof b);
189 iowrite8(b, vp_dev->device + offset);
190 break;
191 case 2:
192 memcpy(&w, buf, sizeof w);
193 iowrite16(le16_to_cpu(w), vp_dev->device + offset);
194 break;
195 case 4:
196 memcpy(&l, buf, sizeof l);
197 iowrite32(le32_to_cpu(l), vp_dev->device + offset);
198 break;
199 case 8:
200 memcpy(&l, buf, sizeof l);
201 iowrite32(le32_to_cpu(l), vp_dev->device + offset);
202 memcpy(&l, buf + sizeof l, sizeof l);
203 iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l);
204 break;
205 default:
206 BUG();
207 }
208}
209
210static u32 vp_generation(struct virtio_device *vdev)
211{
212 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
213 return ioread8(&vp_dev->common->config_generation);
214}
215
216/* config->{get,set}_status() implementations */
217static u8 vp_get_status(struct virtio_device *vdev)
218{
219 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
220 return ioread8(&vp_dev->common->device_status);
221}
222
223static void vp_set_status(struct virtio_device *vdev, u8 status)
224{
225 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
226 /* We should never be setting status to 0. */
227 BUG_ON(status == 0);
228 iowrite8(status, &vp_dev->common->device_status);
229}
230
231static void vp_reset(struct virtio_device *vdev)
232{
233 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
234 /* 0 status means a reset. */
235 iowrite8(0, &vp_dev->common->device_status);
236 /* Flush out the status write, and flush in device writes,
237 * including MSI-X interrupts, if any. */
238 ioread8(&vp_dev->common->device_status);
239 /* Flush pending VQ/configuration callbacks. */
240 vp_synchronize_vectors(vdev);
241}
242
243static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
244{
245 /* Setup the vector used for configuration events */
246 iowrite16(vector, &vp_dev->common->msix_config);
247 /* Verify we had enough resources to assign the vector */
248 /* Will also flush the write out to device */
249 return ioread16(&vp_dev->common->msix_config);
250}
251
252static size_t vring_pci_size(u16 num)
253{
254 /* We only need a cacheline separation. */
255 return PAGE_ALIGN(vring_size(num, SMP_CACHE_BYTES));
256}
257
258static void *alloc_virtqueue_pages(int *num)
259{
260 void *pages;
261
262 /* TODO: allocate each queue chunk individually */
263 for (; *num && vring_pci_size(*num) > PAGE_SIZE; *num /= 2) {
264 pages = alloc_pages_exact(vring_pci_size(*num),
265 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
266 if (pages)
267 return pages;
268 }
269
270 if (!*num)
271 return NULL;
272
273 /* Try to get a single page. You are my only hope! */
274 return alloc_pages_exact(vring_pci_size(*num), GFP_KERNEL|__GFP_ZERO);
275}
276
277static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
278 struct virtio_pci_vq_info *info,
279 unsigned index,
280 void (*callback)(struct virtqueue *vq),
281 const char *name,
282 u16 msix_vec)
283{
284 struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
285 struct virtqueue *vq;
286 u16 num, off;
287 int err;
288
289 if (index >= ioread16(&cfg->num_queues))
290 return ERR_PTR(-ENOENT);
291
292 /* Select the queue we're interested in */
293 iowrite16(index, &cfg->queue_select);
294
295 /* Check if queue is either not available or already active. */
296 num = ioread16(&cfg->queue_size);
297 if (!num || ioread16(&cfg->queue_enable))
298 return ERR_PTR(-ENOENT);
299
300 if (num & (num - 1)) {
301 dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
302 return ERR_PTR(-EINVAL);
303 }
304
305 /* get offset of notification word for this vq */
306 off = ioread16(&cfg->queue_notify_off);
307
308 info->num = num;
309 info->msix_vector = msix_vec;
310
311 info->queue = alloc_virtqueue_pages(&info->num);
312 if (info->queue == NULL)
313 return ERR_PTR(-ENOMEM);
314
315 /* create the vring */
316 vq = vring_new_virtqueue(index, info->num,
317 SMP_CACHE_BYTES, &vp_dev->vdev,
318 true, info->queue, vp_notify, callback, name);
319 if (!vq) {
320 err = -ENOMEM;
321 goto err_new_queue;
322 }
323
324 /* activate the queue */
325 iowrite16(num, &cfg->queue_size);
326 iowrite64_twopart(virt_to_phys(info->queue),
327 &cfg->queue_desc_lo, &cfg->queue_desc_hi);
328 iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)),
329 &cfg->queue_avail_lo, &cfg->queue_avail_hi);
330 iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)),
331 &cfg->queue_used_lo, &cfg->queue_used_hi);
332
333 if (vp_dev->notify_base) {
334 /* offset should not wrap */
335 if ((u64)off * vp_dev->notify_offset_multiplier + 2
336 > vp_dev->notify_len) {
337 dev_warn(&vp_dev->pci_dev->dev,
338 "bad notification offset %u (x %u) "
339 "for queue %u > %zd",
340 off, vp_dev->notify_offset_multiplier,
341 index, vp_dev->notify_len);
342 err = -EINVAL;
343 goto err_map_notify;
344 }
345 vq->priv = (void __force *)vp_dev->notify_base +
346 off * vp_dev->notify_offset_multiplier;
347 } else {
348 vq->priv = (void __force *)map_capability(vp_dev->pci_dev,
349 vp_dev->notify_map_cap, 2, 2,
350 off * vp_dev->notify_offset_multiplier, 2,
351 NULL);
352 }
353
354 if (!vq->priv) {
355 err = -ENOMEM;
356 goto err_map_notify;
357 }
358
359 if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
360 iowrite16(msix_vec, &cfg->queue_msix_vector);
361 msix_vec = ioread16(&cfg->queue_msix_vector);
362 if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
363 err = -EBUSY;
364 goto err_assign_vector;
365 }
366 }
367
368 return vq;
369
370err_assign_vector:
371 if (!vp_dev->notify_base)
372 pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv);
373err_map_notify:
374 vring_del_virtqueue(vq);
375err_new_queue:
376 free_pages_exact(info->queue, vring_pci_size(info->num));
377 return ERR_PTR(err);
378}
379
380static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
381 struct virtqueue *vqs[],
382 vq_callback_t *callbacks[],
383 const char *names[])
384{
385 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
386 struct virtqueue *vq;
387 int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names);
388
389 if (rc)
390 return rc;
391
392 /* Select and activate all queues. Has to be done last: once we do
393 * this, there's no way to go back except reset.
394 */
395 list_for_each_entry(vq, &vdev->vqs, list) {
396 iowrite16(vq->index, &vp_dev->common->queue_select);
397 iowrite16(1, &vp_dev->common->queue_enable);
398 }
399
400 return 0;
401}
402
403static void del_vq(struct virtio_pci_vq_info *info)
404{
405 struct virtqueue *vq = info->vq;
406 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
407
408 iowrite16(vq->index, &vp_dev->common->queue_select);
409
410 if (vp_dev->msix_enabled) {
411 iowrite16(VIRTIO_MSI_NO_VECTOR,
412 &vp_dev->common->queue_msix_vector);
413 /* Flush the write out to device */
414 ioread16(&vp_dev->common->queue_msix_vector);
415 }
416
417 if (!vp_dev->notify_base)
418 pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv);
419
420 vring_del_virtqueue(vq);
421
422 free_pages_exact(info->queue, vring_pci_size(info->num));
423}
424
425static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
426 .get = NULL,
427 .set = NULL,
428 .generation = vp_generation,
429 .get_status = vp_get_status,
430 .set_status = vp_set_status,
431 .reset = vp_reset,
432 .find_vqs = vp_modern_find_vqs,
433 .del_vqs = vp_del_vqs,
434 .get_features = vp_get_features,
435 .finalize_features = vp_finalize_features,
436 .bus_name = vp_bus_name,
437 .set_vq_affinity = vp_set_vq_affinity,
438};
439
440static const struct virtio_config_ops virtio_pci_config_ops = {
441 .get = vp_get,
442 .set = vp_set,
443 .generation = vp_generation,
444 .get_status = vp_get_status,
445 .set_status = vp_set_status,
446 .reset = vp_reset,
447 .find_vqs = vp_modern_find_vqs,
448 .del_vqs = vp_del_vqs,
449 .get_features = vp_get_features,
450 .finalize_features = vp_finalize_features,
451 .bus_name = vp_bus_name,
452 .set_vq_affinity = vp_set_vq_affinity,
453};
454
455/**
456 * virtio_pci_find_capability - walk capabilities to find device info.
457 * @dev: the pci device
458 * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
459 * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
460 *
461 * Returns offset of the capability, or 0.
462 */
463static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
464 u32 ioresource_types)
465{
466 int pos;
467
468 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
469 pos > 0;
470 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
471 u8 type, bar;
472 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
473 cfg_type),
474 &type);
475 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
476 bar),
477 &bar);
478
479 /* Ignore structures with reserved BAR values */
480 if (bar > 0x5)
481 continue;
482
483 if (type == cfg_type) {
484 if (pci_resource_len(dev, bar) &&
485 pci_resource_flags(dev, bar) & ioresource_types)
486 return pos;
487 }
488 }
489 return 0;
490}
491
492/* This is part of the ABI. Don't screw with it. */
493static inline void check_offsets(void)
494{
495 /* Note: disk space was harmed in compilation of this function. */
496 BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
497 offsetof(struct virtio_pci_cap, cap_vndr));
498 BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
499 offsetof(struct virtio_pci_cap, cap_next));
500 BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
501 offsetof(struct virtio_pci_cap, cap_len));
502 BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
503 offsetof(struct virtio_pci_cap, cfg_type));
504 BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
505 offsetof(struct virtio_pci_cap, bar));
506 BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
507 offsetof(struct virtio_pci_cap, offset));
508 BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
509 offsetof(struct virtio_pci_cap, length));
510 BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
511 offsetof(struct virtio_pci_notify_cap,
512 notify_off_multiplier));
513 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
514 offsetof(struct virtio_pci_common_cfg,
515 device_feature_select));
516 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
517 offsetof(struct virtio_pci_common_cfg, device_feature));
518 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
519 offsetof(struct virtio_pci_common_cfg,
520 guest_feature_select));
521 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
522 offsetof(struct virtio_pci_common_cfg, guest_feature));
523 BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
524 offsetof(struct virtio_pci_common_cfg, msix_config));
525 BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
526 offsetof(struct virtio_pci_common_cfg, num_queues));
527 BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
528 offsetof(struct virtio_pci_common_cfg, device_status));
529 BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
530 offsetof(struct virtio_pci_common_cfg, config_generation));
531 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
532 offsetof(struct virtio_pci_common_cfg, queue_select));
533 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
534 offsetof(struct virtio_pci_common_cfg, queue_size));
535 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
536 offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
537 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
538 offsetof(struct virtio_pci_common_cfg, queue_enable));
539 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
540 offsetof(struct virtio_pci_common_cfg, queue_notify_off));
541 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
542 offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
543 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
544 offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
545 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
546 offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
547 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
548 offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
549 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
550 offsetof(struct virtio_pci_common_cfg, queue_used_lo));
551 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
552 offsetof(struct virtio_pci_common_cfg, queue_used_hi));
553}
554
555/* the PCI probing function */
556int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
557{
558 struct pci_dev *pci_dev = vp_dev->pci_dev;
559 int err, common, isr, notify, device;
560 u32 notify_length;
561 u32 notify_offset;
562
563 check_offsets();
564
565 /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
566 if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
567 return -ENODEV;
568
569 if (pci_dev->device < 0x1040) {
570 /* Transitional devices: use the PCI subsystem device id as
571 * virtio device id, same as legacy driver always did.
572 */
573 vp_dev->vdev.id.device = pci_dev->subsystem_device;
574 } else {
575 /* Modern devices: simply use PCI device id, but start from 0x1040. */
576 vp_dev->vdev.id.device = pci_dev->device - 0x1040;
577 }
578 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
579
580 if (virtio_device_is_legacy_only(vp_dev->vdev.id))
581 return -ENODEV;
582
583 /* check for a common config: if not, use legacy mode (bar 0). */
584 common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
585 IORESOURCE_IO | IORESOURCE_MEM);
586 if (!common) {
587 dev_info(&pci_dev->dev,
588 "virtio_pci: leaving for legacy driver\n");
589 return -ENODEV;
590 }
591
592 /* If common is there, these should be too... */
593 isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
594 IORESOURCE_IO | IORESOURCE_MEM);
595 notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
596 IORESOURCE_IO | IORESOURCE_MEM);
597 if (!isr || !notify) {
598 dev_err(&pci_dev->dev,
599 "virtio_pci: missing capabilities %i/%i/%i\n",
600 common, isr, notify);
601 return -EINVAL;
602 }
603
604 /* Device capability is only mandatory for devices that have
605 * device-specific configuration.
606 */
607 device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
608 IORESOURCE_IO | IORESOURCE_MEM);
609
610 err = -EINVAL;
611 vp_dev->common = map_capability(pci_dev, common,
612 sizeof(struct virtio_pci_common_cfg), 4,
613 0, sizeof(struct virtio_pci_common_cfg),
614 NULL);
615 if (!vp_dev->common)
616 goto err_map_common;
617 vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1,
618 0, 1,
619 NULL);
620 if (!vp_dev->isr)
621 goto err_map_isr;
622
623 /* Read notify_off_multiplier from config space. */
624 pci_read_config_dword(pci_dev,
625 notify + offsetof(struct virtio_pci_notify_cap,
626 notify_off_multiplier),
627 &vp_dev->notify_offset_multiplier);
628 /* Read notify length and offset from config space. */
629 pci_read_config_dword(pci_dev,
630 notify + offsetof(struct virtio_pci_notify_cap,
631 cap.length),
632 &notify_length);
633
634 pci_read_config_dword(pci_dev,
635 notify + offsetof(struct virtio_pci_notify_cap,
636 cap.length),
637 &notify_offset);
638
639 /* We don't know how many VQs we'll map, ahead of the time.
640 * If notify length is small, map it all now.
641 * Otherwise, map each VQ individually later.
642 */
643 if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
644 vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2,
645 0, notify_length,
646 &vp_dev->notify_len);
647 if (!vp_dev->notify_base)
648 goto err_map_notify;
649 } else {
650 vp_dev->notify_map_cap = notify;
651 }
652
653 /* Again, we don't know how much we should map, but PAGE_SIZE
654 * is more than enough for all existing devices.
655 */
656 if (device) {
657 vp_dev->device = map_capability(pci_dev, device, 0, 4,
658 0, PAGE_SIZE,
659 &vp_dev->device_len);
660 if (!vp_dev->device)
661 goto err_map_device;
662
663 vp_dev->vdev.config = &virtio_pci_config_ops;
664 } else {
665 vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
666 }
667
668 vp_dev->config_vector = vp_config_vector;
669 vp_dev->setup_vq = setup_vq;
670 vp_dev->del_vq = del_vq;
671
672 return 0;
673
674err_map_device:
675 if (vp_dev->notify_base)
676 pci_iounmap(pci_dev, vp_dev->notify_base);
677err_map_notify:
678 pci_iounmap(pci_dev, vp_dev->isr);
679err_map_isr:
680 pci_iounmap(pci_dev, vp_dev->common);
681err_map_common:
682 return err;
683}
684
685void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
686{
687 struct pci_dev *pci_dev = vp_dev->pci_dev;
688
689 if (vp_dev->device)
690 pci_iounmap(pci_dev, vp_dev->device);
691 if (vp_dev->notify_base)
692 pci_iounmap(pci_dev, vp_dev->notify_base);
693 pci_iounmap(pci_dev, vp_dev->isr);
694 pci_iounmap(pci_dev, vp_dev->common);
695}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 00ec6b3f96b2..096b857e7b75 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -54,8 +54,7 @@
54#define END_USE(vq) 54#define END_USE(vq)
55#endif 55#endif
56 56
57struct vring_virtqueue 57struct vring_virtqueue {
58{
59 struct virtqueue vq; 58 struct virtqueue vq;
60 59
61 /* Actual memory layout for this queue */ 60 /* Actual memory layout for this queue */
@@ -245,14 +244,14 @@ static inline int virtqueue_add(struct virtqueue *_vq,
245 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1); 244 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1);
246 vq->num_added++; 245 vq->num_added++;
247 246
247 pr_debug("Added buffer head %i to %p\n", head, vq);
248 END_USE(vq);
249
248 /* This is very unlikely, but theoretically possible. Kick 250 /* This is very unlikely, but theoretically possible. Kick
249 * just in case. */ 251 * just in case. */
250 if (unlikely(vq->num_added == (1 << 16) - 1)) 252 if (unlikely(vq->num_added == (1 << 16) - 1))
251 virtqueue_kick(_vq); 253 virtqueue_kick(_vq);
252 254
253 pr_debug("Added buffer head %i to %p\n", head, vq);
254 END_USE(vq);
255
256 return 0; 255 return 0;
257} 256}
258 257
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 08f41add1461..16f202350997 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -505,6 +505,16 @@ config MESON_WATCHDOG
505 To compile this driver as a module, choose M here: the 505 To compile this driver as a module, choose M here: the
506 module will be called meson_wdt. 506 module will be called meson_wdt.
507 507
508config MEDIATEK_WATCHDOG
509 tristate "Mediatek SoCs watchdog support"
510 depends on ARCH_MEDIATEK
511 select WATCHDOG_CORE
512 help
513 Say Y here to include support for the watchdog timer
514 in Mediatek SoCs.
515 To compile this driver as a module, choose M here: the
516 module will be called mtk_wdt.
517
508# AVR32 Architecture 518# AVR32 Architecture
509 519
510config AT32AP700X_WDT 520config AT32AP700X_WDT
@@ -1005,6 +1015,8 @@ config W83627HF_WDT
1005 NCT6775 1015 NCT6775
1006 NCT6776 1016 NCT6776
1007 NCT6779 1017 NCT6779
1018 NCT6791
1019 NCT6792
1008 1020
1009 This watchdog simply watches your kernel to make sure it doesn't 1021 This watchdog simply watches your kernel to make sure it doesn't
1010 freeze, and if it does, it reboots your computer after a certain 1022 freeze, and if it does, it reboots your computer after a certain
@@ -1101,7 +1113,7 @@ config ATH79_WDT
1101 1113
1102config BCM47XX_WDT 1114config BCM47XX_WDT
1103 tristate "Broadcom BCM47xx Watchdog Timer" 1115 tristate "Broadcom BCM47xx Watchdog Timer"
1104 depends on BCM47XX 1116 depends on BCM47XX || ARCH_BCM_5301X
1105 select WATCHDOG_CORE 1117 select WATCHDOG_CORE
1106 help 1118 help
1107 Hardware driver for the Broadcom BCM47xx Watchdog Timer. 1119 Hardware driver for the Broadcom BCM47xx Watchdog Timer.
@@ -1235,6 +1247,17 @@ config BCM_KONA_WDT_DEBUG
1235 1247
1236 If in doubt, say 'N'. 1248 If in doubt, say 'N'.
1237 1249
1250config IMGPDC_WDT
1251 tristate "Imagination Technologies PDC Watchdog Timer"
1252 depends on HAS_IOMEM
1253 depends on METAG || MIPS || COMPILE_TEST
1254 help
1255 Driver for Imagination Technologies PowerDown Controller
1256 Watchdog Timer.
1257
1258 To compile this driver as a loadable module, choose M here.
1259 The module will be called imgpdc_wdt.
1260
1238config LANTIQ_WDT 1261config LANTIQ_WDT
1239 tristate "Lantiq SoC watchdog" 1262 tristate "Lantiq SoC watchdog"
1240 depends on LANTIQ 1263 depends on LANTIQ
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index c569ec8f8a76..5c19294d1c30 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_QCOM_WDT) += qcom-wdt.o
63obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o 63obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o
64obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o 64obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
65obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o 65obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o
66obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_wdt.o
66 67
67# AVR32 Architecture 68# AVR32 Architecture
68obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o 69obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -142,6 +143,7 @@ obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o
142octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o 143octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o
143obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o 144obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o
144obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o 145obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o
146obj-$(CONFIG_IMGPDC_WDT) += imgpdc_wdt.o
145 147
146# PARISC Architecture 148# PARISC Architecture
147 149
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index 9816485f6825..b28a072abf78 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -169,6 +169,17 @@ static int bcm47xx_wdt_notify_sys(struct notifier_block *this,
169 return NOTIFY_DONE; 169 return NOTIFY_DONE;
170} 170}
171 171
172static int bcm47xx_wdt_restart(struct notifier_block *this, unsigned long mode,
173 void *cmd)
174{
175 struct bcm47xx_wdt *wdt;
176
177 wdt = container_of(this, struct bcm47xx_wdt, restart_handler);
178 wdt->timer_set(wdt, 1);
179
180 return NOTIFY_DONE;
181}
182
172static struct watchdog_ops bcm47xx_wdt_soft_ops = { 183static struct watchdog_ops bcm47xx_wdt_soft_ops = {
173 .owner = THIS_MODULE, 184 .owner = THIS_MODULE,
174 .start = bcm47xx_wdt_soft_start, 185 .start = bcm47xx_wdt_soft_start,
@@ -209,15 +220,23 @@ static int bcm47xx_wdt_probe(struct platform_device *pdev)
209 if (ret) 220 if (ret)
210 goto err_timer; 221 goto err_timer;
211 222
212 ret = watchdog_register_device(&wdt->wdd); 223 wdt->restart_handler.notifier_call = &bcm47xx_wdt_restart;
224 wdt->restart_handler.priority = 64;
225 ret = register_restart_handler(&wdt->restart_handler);
213 if (ret) 226 if (ret)
214 goto err_notifier; 227 goto err_notifier;
215 228
229 ret = watchdog_register_device(&wdt->wdd);
230 if (ret)
231 goto err_handler;
232
216 dev_info(&pdev->dev, "BCM47xx Watchdog Timer enabled (%d seconds%s%s)\n", 233 dev_info(&pdev->dev, "BCM47xx Watchdog Timer enabled (%d seconds%s%s)\n",
217 timeout, nowayout ? ", nowayout" : "", 234 timeout, nowayout ? ", nowayout" : "",
218 soft ? ", Software Timer" : ""); 235 soft ? ", Software Timer" : "");
219 return 0; 236 return 0;
220 237
238err_handler:
239 unregister_restart_handler(&wdt->restart_handler);
221err_notifier: 240err_notifier:
222 unregister_reboot_notifier(&wdt->notifier); 241 unregister_reboot_notifier(&wdt->notifier);
223err_timer: 242err_timer:
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index 2cd6b2c2dd2a..e2fe2ebdebd4 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/mfd/da9063/registers.h> 21#include <linux/mfd/da9063/registers.h>
22#include <linux/mfd/da9063/core.h> 22#include <linux/mfd/da9063/core.h>
23#include <linux/reboot.h>
23#include <linux/regmap.h> 24#include <linux/regmap.h>
24 25
25/* 26/*
@@ -38,6 +39,7 @@ static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 };
38struct da9063_watchdog { 39struct da9063_watchdog {
39 struct da9063 *da9063; 40 struct da9063 *da9063;
40 struct watchdog_device wdtdev; 41 struct watchdog_device wdtdev;
42 struct notifier_block restart_handler;
41}; 43};
42 44
43static unsigned int da9063_wdt_timeout_to_sel(unsigned int secs) 45static unsigned int da9063_wdt_timeout_to_sel(unsigned int secs)
@@ -119,6 +121,23 @@ static int da9063_wdt_set_timeout(struct watchdog_device *wdd,
119 return ret; 121 return ret;
120} 122}
121 123
124static int da9063_wdt_restart_handler(struct notifier_block *this,
125 unsigned long mode, void *cmd)
126{
127 struct da9063_watchdog *wdt = container_of(this,
128 struct da9063_watchdog,
129 restart_handler);
130 int ret;
131
132 ret = regmap_write(wdt->da9063->regmap, DA9063_REG_CONTROL_F,
133 DA9063_SHUTDOWN);
134 if (ret)
135 dev_alert(wdt->da9063->dev, "Failed to shutdown (err = %d)\n",
136 ret);
137
138 return NOTIFY_DONE;
139}
140
122static const struct watchdog_info da9063_watchdog_info = { 141static const struct watchdog_info da9063_watchdog_info = {
123 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 142 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
124 .identity = "DA9063 Watchdog", 143 .identity = "DA9063 Watchdog",
@@ -163,14 +182,25 @@ static int da9063_wdt_probe(struct platform_device *pdev)
163 dev_set_drvdata(&pdev->dev, wdt); 182 dev_set_drvdata(&pdev->dev, wdt);
164 183
165 ret = watchdog_register_device(&wdt->wdtdev); 184 ret = watchdog_register_device(&wdt->wdtdev);
185 if (ret)
186 return ret;
166 187
167 return ret; 188 wdt->restart_handler.notifier_call = da9063_wdt_restart_handler;
189 wdt->restart_handler.priority = 128;
190 ret = register_restart_handler(&wdt->restart_handler);
191 if (ret)
192 dev_err(wdt->da9063->dev,
193 "Failed to register restart handler (err = %d)\n", ret);
194
195 return 0;
168} 196}
169 197
170static int da9063_wdt_remove(struct platform_device *pdev) 198static int da9063_wdt_remove(struct platform_device *pdev)
171{ 199{
172 struct da9063_watchdog *wdt = dev_get_drvdata(&pdev->dev); 200 struct da9063_watchdog *wdt = dev_get_drvdata(&pdev->dev);
173 201
202 unregister_restart_handler(&wdt->restart_handler);
203
174 watchdog_unregister_device(&wdt->wdtdev); 204 watchdog_unregister_device(&wdt->wdtdev);
175 205
176 return 0; 206 return 0;
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index b34a2e4e4e43..d0bb9499d12c 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -51,6 +51,8 @@
51/* The maximum TOP (timeout period) value that can be set in the watchdog. */ 51/* The maximum TOP (timeout period) value that can be set in the watchdog. */
52#define DW_WDT_MAX_TOP 15 52#define DW_WDT_MAX_TOP 15
53 53
54#define DW_WDT_DEFAULT_SECONDS 30
55
54static bool nowayout = WATCHDOG_NOWAYOUT; 56static bool nowayout = WATCHDOG_NOWAYOUT;
55module_param(nowayout, bool, 0); 57module_param(nowayout, bool, 0);
56MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " 58MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
@@ -96,6 +98,12 @@ static inline void dw_wdt_set_next_heartbeat(void)
96 dw_wdt.next_heartbeat = jiffies + dw_wdt_get_top() * HZ; 98 dw_wdt.next_heartbeat = jiffies + dw_wdt_get_top() * HZ;
97} 99}
98 100
101static void dw_wdt_keepalive(void)
102{
103 writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs +
104 WDOG_COUNTER_RESTART_REG_OFFSET);
105}
106
99static int dw_wdt_set_top(unsigned top_s) 107static int dw_wdt_set_top(unsigned top_s)
100{ 108{
101 int i, top_val = DW_WDT_MAX_TOP; 109 int i, top_val = DW_WDT_MAX_TOP;
@@ -110,21 +118,27 @@ static int dw_wdt_set_top(unsigned top_s)
110 break; 118 break;
111 } 119 }
112 120
113 /* Set the new value in the watchdog. */ 121 /*
122 * Set the new value in the watchdog. Some versions of dw_wdt
123 * have have TOPINIT in the TIMEOUT_RANGE register (as per
124 * CP_WDT_DUAL_TOP in WDT_COMP_PARAMS_1). On those we
125 * effectively get a pat of the watchdog right here.
126 */
114 writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT, 127 writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT,
115 dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); 128 dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
116 129
130 /*
131 * Add an explicit pat to handle versions of the watchdog that
132 * don't have TOPINIT. This won't hurt on versions that have
133 * it.
134 */
135 dw_wdt_keepalive();
136
117 dw_wdt_set_next_heartbeat(); 137 dw_wdt_set_next_heartbeat();
118 138
119 return dw_wdt_top_in_seconds(top_val); 139 return dw_wdt_top_in_seconds(top_val);
120} 140}
121 141
122static void dw_wdt_keepalive(void)
123{
124 writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs +
125 WDOG_COUNTER_RESTART_REG_OFFSET);
126}
127
128static int dw_wdt_restart_handle(struct notifier_block *this, 142static int dw_wdt_restart_handle(struct notifier_block *this,
129 unsigned long mode, void *cmd) 143 unsigned long mode, void *cmd)
130{ 144{
@@ -167,9 +181,9 @@ static int dw_wdt_open(struct inode *inode, struct file *filp)
167 if (!dw_wdt_is_enabled()) { 181 if (!dw_wdt_is_enabled()) {
168 /* 182 /*
169 * The watchdog is not currently enabled. Set the timeout to 183 * The watchdog is not currently enabled. Set the timeout to
170 * the maximum and then start it. 184 * something reasonable and then start it.
171 */ 185 */
172 dw_wdt_set_top(DW_WDT_MAX_TOP); 186 dw_wdt_set_top(DW_WDT_DEFAULT_SECONDS);
173 writel(WDOG_CONTROL_REG_WDT_EN_MASK, 187 writel(WDOG_CONTROL_REG_WDT_EN_MASK,
174 dw_wdt.regs + WDOG_CONTROL_REG_OFFSET); 188 dw_wdt.regs + WDOG_CONTROL_REG_OFFSET);
175 } 189 }
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
index bbdb19b45332..cbc313d37c59 100644
--- a/drivers/watchdog/gpio_wdt.c
+++ b/drivers/watchdog/gpio_wdt.c
@@ -31,6 +31,8 @@ struct gpio_wdt_priv {
31 int gpio; 31 int gpio;
32 bool active_low; 32 bool active_low;
33 bool state; 33 bool state;
34 bool always_running;
35 bool armed;
34 unsigned int hw_algo; 36 unsigned int hw_algo;
35 unsigned int hw_margin; 37 unsigned int hw_margin;
36 unsigned long last_jiffies; 38 unsigned long last_jiffies;
@@ -48,14 +50,20 @@ static void gpio_wdt_disable(struct gpio_wdt_priv *priv)
48 gpio_direction_input(priv->gpio); 50 gpio_direction_input(priv->gpio);
49} 51}
50 52
51static int gpio_wdt_start(struct watchdog_device *wdd) 53static void gpio_wdt_start_impl(struct gpio_wdt_priv *priv)
52{ 54{
53 struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
54
55 priv->state = priv->active_low; 55 priv->state = priv->active_low;
56 gpio_direction_output(priv->gpio, priv->state); 56 gpio_direction_output(priv->gpio, priv->state);
57 priv->last_jiffies = jiffies; 57 priv->last_jiffies = jiffies;
58 mod_timer(&priv->timer, priv->last_jiffies + priv->hw_margin); 58 mod_timer(&priv->timer, priv->last_jiffies + priv->hw_margin);
59}
60
61static int gpio_wdt_start(struct watchdog_device *wdd)
62{
63 struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
64
65 gpio_wdt_start_impl(priv);
66 priv->armed = true;
59 67
60 return 0; 68 return 0;
61} 69}
@@ -64,8 +72,11 @@ static int gpio_wdt_stop(struct watchdog_device *wdd)
64{ 72{
65 struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd); 73 struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
66 74
67 mod_timer(&priv->timer, 0); 75 priv->armed = false;
68 gpio_wdt_disable(priv); 76 if (!priv->always_running) {
77 mod_timer(&priv->timer, 0);
78 gpio_wdt_disable(priv);
79 }
69 80
70 return 0; 81 return 0;
71} 82}
@@ -91,8 +102,8 @@ static void gpio_wdt_hwping(unsigned long data)
91 struct watchdog_device *wdd = (struct watchdog_device *)data; 102 struct watchdog_device *wdd = (struct watchdog_device *)data;
92 struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd); 103 struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
93 104
94 if (time_after(jiffies, priv->last_jiffies + 105 if (priv->armed && time_after(jiffies, priv->last_jiffies +
95 msecs_to_jiffies(wdd->timeout * 1000))) { 106 msecs_to_jiffies(wdd->timeout * 1000))) {
96 dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n"); 107 dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n");
97 return; 108 return;
98 } 109 }
@@ -197,6 +208,9 @@ static int gpio_wdt_probe(struct platform_device *pdev)
197 /* Use safe value (1/2 of real timeout) */ 208 /* Use safe value (1/2 of real timeout) */
198 priv->hw_margin = msecs_to_jiffies(hw_margin / 2); 209 priv->hw_margin = msecs_to_jiffies(hw_margin / 2);
199 210
211 priv->always_running = of_property_read_bool(pdev->dev.of_node,
212 "always-running");
213
200 watchdog_set_drvdata(&priv->wdd, priv); 214 watchdog_set_drvdata(&priv->wdd, priv);
201 215
202 priv->wdd.info = &gpio_wdt_ident; 216 priv->wdd.info = &gpio_wdt_ident;
@@ -216,8 +230,15 @@ static int gpio_wdt_probe(struct platform_device *pdev)
216 priv->notifier.notifier_call = gpio_wdt_notify_sys; 230 priv->notifier.notifier_call = gpio_wdt_notify_sys;
217 ret = register_reboot_notifier(&priv->notifier); 231 ret = register_reboot_notifier(&priv->notifier);
218 if (ret) 232 if (ret)
219 watchdog_unregister_device(&priv->wdd); 233 goto error_unregister;
220 234
235 if (priv->always_running)
236 gpio_wdt_start_impl(priv);
237
238 return 0;
239
240error_unregister:
241 watchdog_unregister_device(&priv->wdd);
221 return ret; 242 return ret;
222} 243}
223 244
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 75d2243b94f5..ada3e44f9932 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -745,7 +745,7 @@ static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
745 745
746 dev_info(&dev->dev, 746 dev_info(&dev->dev,
747 "HP Watchdog Timer Driver: NMI decoding initialized" 747 "HP Watchdog Timer Driver: NMI decoding initialized"
748 ", allow kernel dump: %s (default = 0/OFF)\n", 748 ", allow kernel dump: %s (default = 1/ON)\n",
749 (allow_kdump == 0) ? "OFF" : "ON"); 749 (allow_kdump == 0) ? "OFF" : "ON");
750 return 0; 750 return 0;
751 751
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
new file mode 100644
index 000000000000..c8def68d9e4c
--- /dev/null
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -0,0 +1,289 @@
1/*
2 * Imagination Technologies PowerDown Controller Watchdog Timer.
3 *
4 * Copyright (c) 2014 Imagination Technologies Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * Based on drivers/watchdog/sunxi_wdt.c Copyright (c) 2013 Carlo Caione
11 * 2012 Henrik Nordstrom
12 */
13
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <linux/log2.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/slab.h>
20#include <linux/watchdog.h>
21
22/* registers */
23#define PDC_WDT_SOFT_RESET 0x00
24#define PDC_WDT_CONFIG 0x04
25 #define PDC_WDT_CONFIG_ENABLE BIT(31)
26 #define PDC_WDT_CONFIG_DELAY_MASK 0x1f
27
28#define PDC_WDT_TICKLE1 0x08
29#define PDC_WDT_TICKLE1_MAGIC 0xabcd1234
30#define PDC_WDT_TICKLE2 0x0c
31#define PDC_WDT_TICKLE2_MAGIC 0x4321dcba
32
33#define PDC_WDT_TICKLE_STATUS_MASK 0x7
34#define PDC_WDT_TICKLE_STATUS_SHIFT 0
35#define PDC_WDT_TICKLE_STATUS_HRESET 0x0 /* Hard reset */
36#define PDC_WDT_TICKLE_STATUS_TIMEOUT 0x1 /* Timeout */
37#define PDC_WDT_TICKLE_STATUS_TICKLE 0x2 /* Tickled incorrectly */
38#define PDC_WDT_TICKLE_STATUS_SRESET 0x3 /* Soft reset */
39#define PDC_WDT_TICKLE_STATUS_USER 0x4 /* User reset */
40
41/* Timeout values are in seconds */
42#define PDC_WDT_MIN_TIMEOUT 1
43#define PDC_WDT_DEF_TIMEOUT 64
44
45static int heartbeat;
46module_param(heartbeat, int, 0);
47MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. "
48 "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
49
50static bool nowayout = WATCHDOG_NOWAYOUT;
51module_param(nowayout, bool, 0);
52MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
53 "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
54
55struct pdc_wdt_dev {
56 struct watchdog_device wdt_dev;
57 struct clk *wdt_clk;
58 struct clk *sys_clk;
59 void __iomem *base;
60};
61
62static int pdc_wdt_keepalive(struct watchdog_device *wdt_dev)
63{
64 struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
65
66 writel(PDC_WDT_TICKLE1_MAGIC, wdt->base + PDC_WDT_TICKLE1);
67 writel(PDC_WDT_TICKLE2_MAGIC, wdt->base + PDC_WDT_TICKLE2);
68
69 return 0;
70}
71
72static int pdc_wdt_stop(struct watchdog_device *wdt_dev)
73{
74 unsigned int val;
75 struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
76
77 val = readl(wdt->base + PDC_WDT_CONFIG);
78 val &= ~PDC_WDT_CONFIG_ENABLE;
79 writel(val, wdt->base + PDC_WDT_CONFIG);
80
81 /* Must tickle to finish the stop */
82 pdc_wdt_keepalive(wdt_dev);
83
84 return 0;
85}
86
87static int pdc_wdt_set_timeout(struct watchdog_device *wdt_dev,
88 unsigned int new_timeout)
89{
90 unsigned int val;
91 struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
92 unsigned long clk_rate = clk_get_rate(wdt->wdt_clk);
93
94 wdt->wdt_dev.timeout = new_timeout;
95
96 val = readl(wdt->base + PDC_WDT_CONFIG) & ~PDC_WDT_CONFIG_DELAY_MASK;
97 val |= order_base_2(new_timeout * clk_rate) - 1;
98 writel(val, wdt->base + PDC_WDT_CONFIG);
99
100 return 0;
101}
102
103/* Start the watchdog timer (delay should already be set) */
104static int pdc_wdt_start(struct watchdog_device *wdt_dev)
105{
106 unsigned int val;
107 struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
108
109 val = readl(wdt->base + PDC_WDT_CONFIG);
110 val |= PDC_WDT_CONFIG_ENABLE;
111 writel(val, wdt->base + PDC_WDT_CONFIG);
112
113 return 0;
114}
115
116static struct watchdog_info pdc_wdt_info = {
117 .identity = "IMG PDC Watchdog",
118 .options = WDIOF_SETTIMEOUT |
119 WDIOF_KEEPALIVEPING |
120 WDIOF_MAGICCLOSE,
121};
122
123static const struct watchdog_ops pdc_wdt_ops = {
124 .owner = THIS_MODULE,
125 .start = pdc_wdt_start,
126 .stop = pdc_wdt_stop,
127 .ping = pdc_wdt_keepalive,
128 .set_timeout = pdc_wdt_set_timeout,
129};
130
131static int pdc_wdt_probe(struct platform_device *pdev)
132{
133 int ret, val;
134 unsigned long clk_rate;
135 struct resource *res;
136 struct pdc_wdt_dev *pdc_wdt;
137
138 pdc_wdt = devm_kzalloc(&pdev->dev, sizeof(*pdc_wdt), GFP_KERNEL);
139 if (!pdc_wdt)
140 return -ENOMEM;
141
142 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
143 pdc_wdt->base = devm_ioremap_resource(&pdev->dev, res);
144 if (IS_ERR(pdc_wdt->base))
145 return PTR_ERR(pdc_wdt->base);
146
147 pdc_wdt->sys_clk = devm_clk_get(&pdev->dev, "sys");
148 if (IS_ERR(pdc_wdt->sys_clk)) {
149 dev_err(&pdev->dev, "failed to get the sys clock\n");
150 return PTR_ERR(pdc_wdt->sys_clk);
151 }
152
153 pdc_wdt->wdt_clk = devm_clk_get(&pdev->dev, "wdt");
154 if (IS_ERR(pdc_wdt->wdt_clk)) {
155 dev_err(&pdev->dev, "failed to get the wdt clock\n");
156 return PTR_ERR(pdc_wdt->wdt_clk);
157 }
158
159 ret = clk_prepare_enable(pdc_wdt->sys_clk);
160 if (ret) {
161 dev_err(&pdev->dev, "could not prepare or enable sys clock\n");
162 return ret;
163 }
164
165 ret = clk_prepare_enable(pdc_wdt->wdt_clk);
166 if (ret) {
167 dev_err(&pdev->dev, "could not prepare or enable wdt clock\n");
168 goto disable_sys_clk;
169 }
170
171 /* We use the clock rate to calculate the max timeout */
172 clk_rate = clk_get_rate(pdc_wdt->wdt_clk);
173 if (clk_rate == 0) {
174 dev_err(&pdev->dev, "failed to get clock rate\n");
175 ret = -EINVAL;
176 goto disable_wdt_clk;
177 }
178
179 if (order_base_2(clk_rate) > PDC_WDT_CONFIG_DELAY_MASK + 1) {
180 dev_err(&pdev->dev, "invalid clock rate\n");
181 ret = -EINVAL;
182 goto disable_wdt_clk;
183 }
184
185 if (order_base_2(clk_rate) == 0)
186 pdc_wdt->wdt_dev.min_timeout = PDC_WDT_MIN_TIMEOUT + 1;
187 else
188 pdc_wdt->wdt_dev.min_timeout = PDC_WDT_MIN_TIMEOUT;
189
190 pdc_wdt->wdt_dev.info = &pdc_wdt_info;
191 pdc_wdt->wdt_dev.ops = &pdc_wdt_ops;
192 pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK;
193 pdc_wdt->wdt_dev.parent = &pdev->dev;
194
195 ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
196 if (ret < 0) {
197 pdc_wdt->wdt_dev.timeout = pdc_wdt->wdt_dev.max_timeout;
198 dev_warn(&pdev->dev,
199 "Initial timeout out of range! setting max timeout\n");
200 }
201
202 pdc_wdt_stop(&pdc_wdt->wdt_dev);
203
204 /* Find what caused the last reset */
205 val = readl(pdc_wdt->base + PDC_WDT_TICKLE1);
206 val = (val & PDC_WDT_TICKLE_STATUS_MASK) >> PDC_WDT_TICKLE_STATUS_SHIFT;
207 switch (val) {
208 case PDC_WDT_TICKLE_STATUS_TICKLE:
209 case PDC_WDT_TICKLE_STATUS_TIMEOUT:
210 pdc_wdt->wdt_dev.bootstatus |= WDIOF_CARDRESET;
211 dev_info(&pdev->dev,
212 "watchdog module last reset due to timeout\n");
213 break;
214 case PDC_WDT_TICKLE_STATUS_HRESET:
215 dev_info(&pdev->dev,
216 "watchdog module last reset due to hard reset\n");
217 break;
218 case PDC_WDT_TICKLE_STATUS_SRESET:
219 dev_info(&pdev->dev,
220 "watchdog module last reset due to soft reset\n");
221 break;
222 case PDC_WDT_TICKLE_STATUS_USER:
223 dev_info(&pdev->dev,
224 "watchdog module last reset due to user reset\n");
225 break;
226 default:
227 dev_info(&pdev->dev,
228 "contains an illegal status code (%08x)\n", val);
229 break;
230 }
231
232 watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout);
233
234 platform_set_drvdata(pdev, pdc_wdt);
235 watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
236
237 ret = watchdog_register_device(&pdc_wdt->wdt_dev);
238 if (ret)
239 goto disable_wdt_clk;
240
241 return 0;
242
243disable_wdt_clk:
244 clk_disable_unprepare(pdc_wdt->wdt_clk);
245disable_sys_clk:
246 clk_disable_unprepare(pdc_wdt->sys_clk);
247 return ret;
248}
249
250static void pdc_wdt_shutdown(struct platform_device *pdev)
251{
252 struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev);
253
254 pdc_wdt_stop(&pdc_wdt->wdt_dev);
255}
256
257static int pdc_wdt_remove(struct platform_device *pdev)
258{
259 struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev);
260
261 pdc_wdt_stop(&pdc_wdt->wdt_dev);
262 watchdog_unregister_device(&pdc_wdt->wdt_dev);
263 clk_disable_unprepare(pdc_wdt->wdt_clk);
264 clk_disable_unprepare(pdc_wdt->sys_clk);
265
266 return 0;
267}
268
269static const struct of_device_id pdc_wdt_match[] = {
270 { .compatible = "img,pdc-wdt" },
271 {}
272};
273MODULE_DEVICE_TABLE(of, pdc_wdt_match);
274
275static struct platform_driver pdc_wdt_driver = {
276 .driver = {
277 .name = "imgpdc-wdt",
278 .of_match_table = pdc_wdt_match,
279 },
280 .probe = pdc_wdt_probe,
281 .remove = pdc_wdt_remove,
282 .shutdown = pdc_wdt_shutdown,
283};
284module_platform_driver(pdc_wdt_driver);
285
286MODULE_AUTHOR("Jude Abraham <Jude.Abraham@imgtec.com>");
287MODULE_AUTHOR("Naidu Tellapati <Naidu.Tellapati@imgtec.com>");
288MODULE_DESCRIPTION("Imagination Technologies PDC Watchdog Timer Driver");
289MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 5142bbabe027..5e6d808d358a 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -205,7 +205,7 @@ static inline void imx2_wdt_ping_if_active(struct watchdog_device *wdog)
205 } 205 }
206} 206}
207 207
208static struct watchdog_ops imx2_wdt_ops = { 208static const struct watchdog_ops imx2_wdt_ops = {
209 .owner = THIS_MODULE, 209 .owner = THIS_MODULE,
210 .start = imx2_wdt_start, 210 .start = imx2_wdt_start,
211 .stop = imx2_wdt_stop, 211 .stop = imx2_wdt_stop,
@@ -213,7 +213,7 @@ static struct watchdog_ops imx2_wdt_ops = {
213 .set_timeout = imx2_wdt_set_timeout, 213 .set_timeout = imx2_wdt_set_timeout,
214}; 214};
215 215
216static struct regmap_config imx2_wdt_regmap_config = { 216static const struct regmap_config imx2_wdt_regmap_config = {
217 .reg_bits = 16, 217 .reg_bits = 16,
218 .reg_stride = 2, 218 .reg_stride = 2,
219 .val_bits = 16, 219 .val_bits = 16,
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index 0b93739c0106..e54839b12650 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -12,8 +12,8 @@
12 * http://www.ite.com.tw/ 12 * http://www.ite.com.tw/
13 * 13 *
14 * Support of the watchdog timers, which are available on 14 * Support of the watchdog timers, which are available on
15 * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 15 * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726,
16 * and IT8728. 16 * IT8728 and IT8783.
17 * 17 *
18 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License 19 * modify it under the terms of the GNU General Public License
@@ -87,6 +87,7 @@
87#define IT8721_ID 0x8721 87#define IT8721_ID 0x8721
88#define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */ 88#define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */
89#define IT8728_ID 0x8728 89#define IT8728_ID 0x8728
90#define IT8783_ID 0x8783
90 91
91/* GPIO Configuration Registers LDN=0x07 */ 92/* GPIO Configuration Registers LDN=0x07 */
92#define WDTCTRL 0x71 93#define WDTCTRL 0x71
@@ -633,6 +634,7 @@ static int __init it87_wdt_init(void)
633 case IT8720_ID: 634 case IT8720_ID:
634 case IT8721_ID: 635 case IT8721_ID:
635 case IT8728_ID: 636 case IT8728_ID:
637 case IT8783_ID:
636 max_units = 65535; 638 max_units = 65535;
637 try_gameport = 0; 639 try_gameport = 0;
638 break; 640 break;
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 18e41afa4da3..4c2cc09c0c57 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -24,6 +24,7 @@
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/of.h>
27 28
28#include <asm/mach-jz4740/timer.h> 29#include <asm/mach-jz4740/timer.h>
29 30
@@ -142,6 +143,14 @@ static const struct watchdog_ops jz4740_wdt_ops = {
142 .set_timeout = jz4740_wdt_set_timeout, 143 .set_timeout = jz4740_wdt_set_timeout,
143}; 144};
144 145
146#ifdef CONFIG_OF
147static const struct of_device_id jz4740_wdt_of_matches[] = {
148 { .compatible = "ingenic,jz4740-watchdog", },
149 { /* sentinel */ }
150};
151MODULE_DEVICE_TABLE(of, jz4740_wdt_of_matches)
152#endif
153
145static int jz4740_wdt_probe(struct platform_device *pdev) 154static int jz4740_wdt_probe(struct platform_device *pdev)
146{ 155{
147 struct jz4740_wdt_drvdata *drvdata; 156 struct jz4740_wdt_drvdata *drvdata;
@@ -211,6 +220,7 @@ static struct platform_driver jz4740_wdt_driver = {
211 .remove = jz4740_wdt_remove, 220 .remove = jz4740_wdt_remove,
212 .driver = { 221 .driver = {
213 .name = "jz4740-wdt", 222 .name = "jz4740-wdt",
223 .of_match_table = of_match_ptr(jz4740_wdt_of_matches),
214 }, 224 },
215}; 225};
216 226
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
new file mode 100644
index 000000000000..a87f6df6e85f
--- /dev/null
+++ b/drivers/watchdog/mtk_wdt.c
@@ -0,0 +1,251 @@
1/*
2 * Mediatek Watchdog Driver
3 *
4 * Copyright (C) 2014 Matthias Brugger
5 *
6 * Matthias Brugger <matthias.bgg@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * Based on sunxi_wdt.c
19 */
20
21#include <linux/err.h>
22#include <linux/init.h>
23#include <linux/io.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/of.h>
28#include <linux/platform_device.h>
29#include <linux/types.h>
30#include <linux/watchdog.h>
31#include <linux/notifier.h>
32#include <linux/reboot.h>
33#include <linux/delay.h>
34
35#define WDT_MAX_TIMEOUT 31
36#define WDT_MIN_TIMEOUT 1
37#define WDT_LENGTH_TIMEOUT(n) ((n) << 5)
38
39#define WDT_LENGTH 0x04
40#define WDT_LENGTH_KEY 0x8
41
42#define WDT_RST 0x08
43#define WDT_RST_RELOAD 0x1971
44
45#define WDT_MODE 0x00
46#define WDT_MODE_EN (1 << 0)
47#define WDT_MODE_EXT_POL_LOW (0 << 1)
48#define WDT_MODE_EXT_POL_HIGH (1 << 1)
49#define WDT_MODE_EXRST_EN (1 << 2)
50#define WDT_MODE_IRQ_EN (1 << 3)
51#define WDT_MODE_AUTO_START (1 << 4)
52#define WDT_MODE_DUAL_EN (1 << 6)
53#define WDT_MODE_KEY 0x22000000
54
55#define WDT_SWRST 0x14
56#define WDT_SWRST_KEY 0x1209
57
58#define DRV_NAME "mtk-wdt"
59#define DRV_VERSION "1.0"
60
61static bool nowayout = WATCHDOG_NOWAYOUT;
62static unsigned int timeout = WDT_MAX_TIMEOUT;
63
64struct mtk_wdt_dev {
65 struct watchdog_device wdt_dev;
66 void __iomem *wdt_base;
67 struct notifier_block restart_handler;
68};
69
70static int mtk_reset_handler(struct notifier_block *this, unsigned long mode,
71 void *cmd)
72{
73 struct mtk_wdt_dev *mtk_wdt;
74 void __iomem *wdt_base;
75
76 mtk_wdt = container_of(this, struct mtk_wdt_dev, restart_handler);
77 wdt_base = mtk_wdt->wdt_base;
78
79 while (1) {
80 writel(WDT_SWRST_KEY, wdt_base + WDT_SWRST);
81 mdelay(5);
82 }
83
84 return NOTIFY_DONE;
85}
86
87static int mtk_wdt_ping(struct watchdog_device *wdt_dev)
88{
89 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
90 void __iomem *wdt_base = mtk_wdt->wdt_base;
91
92 iowrite32(WDT_RST_RELOAD, wdt_base + WDT_RST);
93
94 return 0;
95}
96
97static int mtk_wdt_set_timeout(struct watchdog_device *wdt_dev,
98 unsigned int timeout)
99{
100 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
101 void __iomem *wdt_base = mtk_wdt->wdt_base;
102 u32 reg;
103
104 wdt_dev->timeout = timeout;
105
106 /*
107 * One bit is the value of 512 ticks
108 * The clock has 32 KHz
109 */
110 reg = WDT_LENGTH_TIMEOUT(timeout << 6) | WDT_LENGTH_KEY;
111 iowrite32(reg, wdt_base + WDT_LENGTH);
112
113 mtk_wdt_ping(wdt_dev);
114
115 return 0;
116}
117
118static int mtk_wdt_stop(struct watchdog_device *wdt_dev)
119{
120 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
121 void __iomem *wdt_base = mtk_wdt->wdt_base;
122 u32 reg;
123
124 reg = readl(wdt_base + WDT_MODE);
125 reg &= ~WDT_MODE_EN;
126 iowrite32(reg, wdt_base + WDT_MODE);
127
128 return 0;
129}
130
131static int mtk_wdt_start(struct watchdog_device *wdt_dev)
132{
133 u32 reg;
134 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
135 void __iomem *wdt_base = mtk_wdt->wdt_base;
136 u32 ret;
137
138 ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
139 if (ret < 0)
140 return ret;
141
142 reg = ioread32(wdt_base + WDT_MODE);
143 reg &= ~(WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN);
144 reg |= (WDT_MODE_EN | WDT_MODE_KEY);
145 iowrite32(reg, wdt_base + WDT_MODE);
146
147 return 0;
148}
149
150static const struct watchdog_info mtk_wdt_info = {
151 .identity = DRV_NAME,
152 .options = WDIOF_SETTIMEOUT |
153 WDIOF_KEEPALIVEPING |
154 WDIOF_MAGICCLOSE,
155};
156
157static const struct watchdog_ops mtk_wdt_ops = {
158 .owner = THIS_MODULE,
159 .start = mtk_wdt_start,
160 .stop = mtk_wdt_stop,
161 .ping = mtk_wdt_ping,
162 .set_timeout = mtk_wdt_set_timeout,
163};
164
165static int mtk_wdt_probe(struct platform_device *pdev)
166{
167 struct mtk_wdt_dev *mtk_wdt;
168 struct resource *res;
169 int err;
170
171 mtk_wdt = devm_kzalloc(&pdev->dev, sizeof(*mtk_wdt), GFP_KERNEL);
172 if (!mtk_wdt)
173 return -ENOMEM;
174
175 platform_set_drvdata(pdev, mtk_wdt);
176
177 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
178 mtk_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
179 if (IS_ERR(mtk_wdt->wdt_base))
180 return PTR_ERR(mtk_wdt->wdt_base);
181
182 mtk_wdt->wdt_dev.info = &mtk_wdt_info;
183 mtk_wdt->wdt_dev.ops = &mtk_wdt_ops;
184 mtk_wdt->wdt_dev.timeout = WDT_MAX_TIMEOUT;
185 mtk_wdt->wdt_dev.max_timeout = WDT_MAX_TIMEOUT;
186 mtk_wdt->wdt_dev.min_timeout = WDT_MIN_TIMEOUT;
187 mtk_wdt->wdt_dev.parent = &pdev->dev;
188
189 watchdog_init_timeout(&mtk_wdt->wdt_dev, timeout, &pdev->dev);
190 watchdog_set_nowayout(&mtk_wdt->wdt_dev, nowayout);
191
192 watchdog_set_drvdata(&mtk_wdt->wdt_dev, mtk_wdt);
193
194 mtk_wdt_stop(&mtk_wdt->wdt_dev);
195
196 err = watchdog_register_device(&mtk_wdt->wdt_dev);
197 if (unlikely(err))
198 return err;
199
200 mtk_wdt->restart_handler.notifier_call = mtk_reset_handler;
201 mtk_wdt->restart_handler.priority = 128;
202 err = register_restart_handler(&mtk_wdt->restart_handler);
203 if (err)
204 dev_warn(&pdev->dev,
205 "cannot register restart handler (err=%d)\n", err);
206
207 dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)\n",
208 mtk_wdt->wdt_dev.timeout, nowayout);
209
210 return 0;
211}
212
213static int mtk_wdt_remove(struct platform_device *pdev)
214{
215 struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
216
217 unregister_restart_handler(&mtk_wdt->restart_handler);
218
219 watchdog_unregister_device(&mtk_wdt->wdt_dev);
220
221 return 0;
222}
223
224static const struct of_device_id mtk_wdt_dt_ids[] = {
225 { .compatible = "mediatek,mt6589-wdt" },
226 { /* sentinel */ }
227};
228MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
229
230static struct platform_driver mtk_wdt_driver = {
231 .probe = mtk_wdt_probe,
232 .remove = mtk_wdt_remove,
233 .driver = {
234 .name = DRV_NAME,
235 .of_match_table = mtk_wdt_dt_ids,
236 },
237};
238
239module_platform_driver(mtk_wdt_driver);
240
241module_param(timeout, uint, 0);
242MODULE_PARM_DESC(timeout, "Watchdog heartbeat in seconds");
243
244module_param(nowayout, bool, 0);
245MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
246 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
247
248MODULE_LICENSE("GPL");
249MODULE_AUTHOR("Matthias Brugger <matthias.bgg@gmail.com>");
250MODULE_DESCRIPTION("Mediatek WatchDog Timer Driver");
251MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 9f2709db61ca..1e6be9e40577 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -189,7 +189,7 @@ static int omap_wdt_set_timeout(struct watchdog_device *wdog,
189} 189}
190 190
191static const struct watchdog_info omap_wdt_info = { 191static const struct watchdog_info omap_wdt_info = {
192 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 192 .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
193 .identity = "OMAP Watchdog", 193 .identity = "OMAP Watchdog",
194}; 194};
195 195
diff --git a/drivers/watchdog/retu_wdt.c b/drivers/watchdog/retu_wdt.c
index a7a0695971e4..b7c68e275aeb 100644
--- a/drivers/watchdog/retu_wdt.c
+++ b/drivers/watchdog/retu_wdt.c
@@ -94,7 +94,7 @@ static int retu_wdt_set_timeout(struct watchdog_device *wdog,
94} 94}
95 95
96static const struct watchdog_info retu_wdt_info = { 96static const struct watchdog_info retu_wdt_info = {
97 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 97 .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
98 .identity = "Retu watchdog", 98 .identity = "Retu watchdog",
99}; 99};
100 100
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 11aad5b7aafe..a6f7e2e29beb 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -45,6 +45,7 @@
45static struct clk *rt288x_wdt_clk; 45static struct clk *rt288x_wdt_clk;
46static unsigned long rt288x_wdt_freq; 46static unsigned long rt288x_wdt_freq;
47static void __iomem *rt288x_wdt_base; 47static void __iomem *rt288x_wdt_base;
48static struct reset_control *rt288x_wdt_reset;
48 49
49static bool nowayout = WATCHDOG_NOWAYOUT; 50static bool nowayout = WATCHDOG_NOWAYOUT;
50module_param(nowayout, bool, 0); 51module_param(nowayout, bool, 0);
@@ -151,16 +152,18 @@ static int rt288x_wdt_probe(struct platform_device *pdev)
151 if (IS_ERR(rt288x_wdt_clk)) 152 if (IS_ERR(rt288x_wdt_clk))
152 return PTR_ERR(rt288x_wdt_clk); 153 return PTR_ERR(rt288x_wdt_clk);
153 154
154 device_reset(&pdev->dev); 155 rt288x_wdt_reset = devm_reset_control_get(&pdev->dev, NULL);
156 if (!IS_ERR(rt288x_wdt_reset))
157 reset_control_deassert(rt288x_wdt_reset);
155 158
156 rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE; 159 rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE;
157 160
158 rt288x_wdt_dev.dev = &pdev->dev; 161 rt288x_wdt_dev.dev = &pdev->dev;
159 rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause(); 162 rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause();
160
161 rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq); 163 rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq);
162 rt288x_wdt_dev.timeout = rt288x_wdt_dev.max_timeout;
163 164
165 watchdog_init_timeout(&rt288x_wdt_dev, rt288x_wdt_dev.max_timeout,
166 &pdev->dev);
164 watchdog_set_nowayout(&rt288x_wdt_dev, nowayout); 167 watchdog_set_nowayout(&rt288x_wdt_dev, nowayout);
165 168
166 ret = watchdog_register_device(&rt288x_wdt_dev); 169 ret = watchdog_register_device(&rt288x_wdt_dev);
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 12c15903d098..2c1db6fa9a27 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -57,7 +57,7 @@ static int twl4030_wdt_set_timeout(struct watchdog_device *wdt,
57} 57}
58 58
59static const struct watchdog_info twl4030_wdt_info = { 59static const struct watchdog_info twl4030_wdt_info = {
60 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 60 .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
61 .identity = "TWL4030 Watchdog", 61 .identity = "TWL4030 Watchdog",
62}; 62};
63 63
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 7165704a3e33..5824e25eebbb 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -50,7 +50,7 @@ static int cr_wdt_control; /* WDT control register */
50 50
51enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf, 51enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf,
52 w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p, 52 w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p,
53 w83667hg_b, nct6775, nct6776, nct6779 }; 53 w83667hg_b, nct6775, nct6776, nct6779, nct6791, nct6792 };
54 54
55static int timeout; /* in seconds */ 55static int timeout; /* in seconds */
56module_param(timeout, int, 0); 56module_param(timeout, int, 0);
@@ -95,6 +95,8 @@ MODULE_PARM_DESC(early_disable, "Disable watchdog at boot time (default=0)");
95#define NCT6775_ID 0xb4 95#define NCT6775_ID 0xb4
96#define NCT6776_ID 0xc3 96#define NCT6776_ID 0xc3
97#define NCT6779_ID 0xc5 97#define NCT6779_ID 0xc5
98#define NCT6791_ID 0xc8
99#define NCT6792_ID 0xc9
98 100
99#define W83627HF_WDT_TIMEOUT 0xf6 101#define W83627HF_WDT_TIMEOUT 0xf6
100#define W83697HF_WDT_TIMEOUT 0xf4 102#define W83697HF_WDT_TIMEOUT 0xf4
@@ -195,6 +197,8 @@ static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
195 case nct6775: 197 case nct6775:
196 case nct6776: 198 case nct6776:
197 case nct6779: 199 case nct6779:
200 case nct6791:
201 case nct6792:
198 /* 202 /*
199 * These chips have a fixed WDTO# output pin (W83627UHG), 203 * These chips have a fixed WDTO# output pin (W83627UHG),
200 * or support more than one WDTO# output pin. 204 * or support more than one WDTO# output pin.
@@ -395,6 +399,12 @@ static int wdt_find(int addr)
395 case NCT6779_ID: 399 case NCT6779_ID:
396 ret = nct6779; 400 ret = nct6779;
397 break; 401 break;
402 case NCT6791_ID:
403 ret = nct6791;
404 break;
405 case NCT6792_ID:
406 ret = nct6792;
407 break;
398 case 0xff: 408 case 0xff:
399 ret = -ENODEV; 409 ret = -ENODEV;
400 break; 410 break;
@@ -428,6 +438,8 @@ static int __init wdt_init(void)
428 "NCT6775", 438 "NCT6775",
429 "NCT6776", 439 "NCT6776",
430 "NCT6779", 440 "NCT6779",
441 "NCT6791",
442 "NCT6792",
431 }; 443 };
432 444
433 wdt_io = 0x2e; 445 wdt_io = 0x2e;
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 2140398a2a8c..2ccd3592d41f 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -2,7 +2,7 @@ ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),)
2obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o 2obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
3endif 3endif
4obj-$(CONFIG_X86) += fallback.o 4obj-$(CONFIG_X86) += fallback.o
5obj-y += grant-table.o features.o balloon.o manage.o 5obj-y += grant-table.o features.o balloon.o manage.o preempt.o
6obj-y += events/ 6obj-y += events/
7obj-y += xenbus/ 7obj-y += xenbus/
8 8
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
new file mode 100644
index 000000000000..a1800c150839
--- /dev/null
+++ b/drivers/xen/preempt.c
@@ -0,0 +1,44 @@
1/*
2 * Preemptible hypercalls
3 *
4 * Copyright (C) 2014 Citrix Systems R&D ltd.
5 *
6 * This source code is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <xen/xen-ops.h>
14
15#ifndef CONFIG_PREEMPT
16
17/*
18 * Some hypercalls issued by the toolstack can take many 10s of
19 * seconds. Allow tasks running hypercalls via the privcmd driver to
20 * be voluntarily preempted even if full kernel preemption is
21 * disabled.
22 *
23 * Such preemptible hypercalls are bracketed by
24 * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
25 * calls.
26 */
27
28DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
29EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
30
31asmlinkage __visible void xen_maybe_preempt_hcall(void)
32{
33 if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
34 && should_resched())) {
35 /*
36 * Clear flag as we may be rescheduled on a different
37 * cpu.
38 */
39 __this_cpu_write(xen_in_preemptible_hcall, false);
40 _cond_resched();
41 __this_cpu_write(xen_in_preemptible_hcall, true);
42 }
43}
44#endif /* CONFIG_PREEMPT */
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 569a13b9e856..59ac71c4a043 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -56,10 +56,12 @@ static long privcmd_ioctl_hypercall(void __user *udata)
56 if (copy_from_user(&hypercall, udata, sizeof(hypercall))) 56 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
57 return -EFAULT; 57 return -EFAULT;
58 58
59 xen_preemptible_hcall_begin();
59 ret = privcmd_call(hypercall.op, 60 ret = privcmd_call(hypercall.op,
60 hypercall.arg[0], hypercall.arg[1], 61 hypercall.arg[0], hypercall.arg[1],
61 hypercall.arg[2], hypercall.arg[3], 62 hypercall.arg[2], hypercall.arg[3],
62 hypercall.arg[4]); 63 hypercall.arg[4]);
64 xen_preemptible_hcall_end();
63 65
64 return ret; 66 return ret;
65} 67}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 61653a03a8f5..9faca6a60bb0 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -709,12 +709,11 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
709static int scsiback_do_cmd_fn(struct vscsibk_info *info) 709static int scsiback_do_cmd_fn(struct vscsibk_info *info)
710{ 710{
711 struct vscsiif_back_ring *ring = &info->ring; 711 struct vscsiif_back_ring *ring = &info->ring;
712 struct vscsiif_request *ring_req; 712 struct vscsiif_request ring_req;
713 struct vscsibk_pend *pending_req; 713 struct vscsibk_pend *pending_req;
714 RING_IDX rc, rp; 714 RING_IDX rc, rp;
715 int err, more_to_do; 715 int err, more_to_do;
716 uint32_t result; 716 uint32_t result;
717 uint8_t act;
718 717
719 rc = ring->req_cons; 718 rc = ring->req_cons;
720 rp = ring->sring->req_prod; 719 rp = ring->sring->req_prod;
@@ -735,11 +734,10 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
735 if (!pending_req) 734 if (!pending_req)
736 return 1; 735 return 1;
737 736
738 ring_req = RING_GET_REQUEST(ring, rc); 737 ring_req = *RING_GET_REQUEST(ring, rc);
739 ring->req_cons = ++rc; 738 ring->req_cons = ++rc;
740 739
741 act = ring_req->act; 740 err = prepare_pending_reqs(info, &ring_req, pending_req);
742 err = prepare_pending_reqs(info, ring_req, pending_req);
743 if (err) { 741 if (err) {
744 switch (err) { 742 switch (err) {
745 case -ENODEV: 743 case -ENODEV:
@@ -755,9 +753,9 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
755 return 1; 753 return 1;
756 } 754 }
757 755
758 switch (act) { 756 switch (ring_req.act) {
759 case VSCSIIF_ACT_SCSI_CDB: 757 case VSCSIIF_ACT_SCSI_CDB:
760 if (scsiback_gnttab_data_map(ring_req, pending_req)) { 758 if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
761 scsiback_fast_flush_area(pending_req); 759 scsiback_fast_flush_area(pending_req);
762 scsiback_do_resp_with_sense(NULL, 760 scsiback_do_resp_with_sense(NULL,
763 DRIVER_ERROR << 24, 0, pending_req); 761 DRIVER_ERROR << 24, 0, pending_req);
@@ -768,7 +766,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
768 break; 766 break;
769 case VSCSIIF_ACT_SCSI_ABORT: 767 case VSCSIIF_ACT_SCSI_ABORT:
770 scsiback_device_action(pending_req, TMR_ABORT_TASK, 768 scsiback_device_action(pending_req, TMR_ABORT_TASK,
771 ring_req->ref_rqid); 769 ring_req.ref_rqid);
772 break; 770 break;
773 case VSCSIIF_ACT_SCSI_RESET: 771 case VSCSIIF_ACT_SCSI_RESET:
774 scsiback_device_action(pending_req, TMR_LUN_RESET, 0); 772 scsiback_device_action(pending_req, TMR_LUN_RESET, 0);
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 9ee5343d4884..3662f1d1d9cf 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -1127,7 +1127,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
1127 } 1127 }
1128 1128
1129 /* Write all dirty data */ 1129 /* Write all dirty data */
1130 if (S_ISREG(dentry->d_inode->i_mode)) 1130 if (d_is_reg(dentry))
1131 filemap_write_and_wait(dentry->d_inode->i_mapping); 1131 filemap_write_and_wait(dentry->d_inode->i_mapping);
1132 1132
1133 retval = p9_client_wstat(fid, &wstat); 1133 retval = p9_client_wstat(fid, &wstat);
diff --git a/fs/aio.c b/fs/aio.c
index 118a2e0088d8..f8e52a1854c1 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1285,7 +1285,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1285 1285
1286 ret = -EINVAL; 1286 ret = -EINVAL;
1287 if (unlikely(ctx || nr_events == 0)) { 1287 if (unlikely(ctx || nr_events == 0)) {
1288 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", 1288 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1289 ctx, nr_events); 1289 ctx, nr_events);
1290 goto out; 1290 goto out;
1291 } 1291 }
@@ -1333,7 +1333,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1333 1333
1334 return ret; 1334 return ret;
1335 } 1335 }
1336 pr_debug("EINVAL: io_destroy: invalid context id\n"); 1336 pr_debug("EINVAL: invalid context id\n");
1337 return -EINVAL; 1337 return -EINVAL;
1338} 1338}
1339 1339
@@ -1515,7 +1515,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1515 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || 1515 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1516 ((ssize_t)iocb->aio_nbytes < 0) 1516 ((ssize_t)iocb->aio_nbytes < 0)
1517 )) { 1517 )) {
1518 pr_debug("EINVAL: io_submit: overflow check\n"); 1518 pr_debug("EINVAL: overflow check\n");
1519 return -EINVAL; 1519 return -EINVAL;
1520 } 1520 }
1521 1521
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index aaf96cb25452..ac7d921ed984 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
95 */ 95 */
96static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in) 96static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
97{ 97{
98 struct autofs_dev_ioctl tmp; 98 struct autofs_dev_ioctl tmp, *res;
99 99
100 if (copy_from_user(&tmp, in, sizeof(tmp))) 100 if (copy_from_user(&tmp, in, sizeof(tmp)))
101 return ERR_PTR(-EFAULT); 101 return ERR_PTR(-EFAULT);
@@ -106,7 +106,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i
106 if (tmp.size > (PATH_MAX + sizeof(tmp))) 106 if (tmp.size > (PATH_MAX + sizeof(tmp)))
107 return ERR_PTR(-ENAMETOOLONG); 107 return ERR_PTR(-ENAMETOOLONG);
108 108
109 return memdup_user(in, tmp.size); 109 res = memdup_user(in, tmp.size);
110 if (!IS_ERR(res))
111 res->size = tmp.size;
112
113 return res;
110} 114}
111 115
112static inline void free_dev_ioctl(struct autofs_dev_ioctl *param) 116static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index bfdbaba9c2ba..11dd118f75e2 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -374,7 +374,7 @@ static struct dentry *should_expire(struct dentry *dentry,
374 return NULL; 374 return NULL;
375 } 375 }
376 376
377 if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) { 377 if (dentry->d_inode && d_is_symlink(dentry)) {
378 DPRINTK("checking symlink %p %pd", dentry, dentry); 378 DPRINTK("checking symlink %p %pd", dentry, dentry);
379 /* 379 /*
380 * A symlink can't be "busy" in the usual sense so 380 * A symlink can't be "busy" in the usual sense so
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index dbb5b7212ce1..7e44fdd03e2d 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -108,7 +108,7 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
108 struct dentry *dentry = file->f_path.dentry; 108 struct dentry *dentry = file->f_path.dentry;
109 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 109 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
110 110
111 DPRINTK("file=%p dentry=%p %pD", file, dentry, dentry); 111 DPRINTK("file=%p dentry=%p %pd", file, dentry, dentry);
112 112
113 if (autofs4_oz_mode(sbi)) 113 if (autofs4_oz_mode(sbi))
114 goto out; 114 goto out;
@@ -371,7 +371,7 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
371 * having d_mountpoint() true, so there's no need to call back 371 * having d_mountpoint() true, so there's no need to call back
372 * to the daemon. 372 * to the daemon.
373 */ 373 */
374 if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) { 374 if (dentry->d_inode && d_is_symlink(dentry)) {
375 spin_unlock(&sbi->fs_lock); 375 spin_unlock(&sbi->fs_lock);
376 goto done; 376 goto done;
377 } 377 }
@@ -485,7 +485,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
485 * an incorrect ELOOP error return. 485 * an incorrect ELOOP error return.
486 */ 486 */
487 if ((!d_mountpoint(dentry) && !simple_empty(dentry)) || 487 if ((!d_mountpoint(dentry) && !simple_empty(dentry)) ||
488 (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode))) 488 (dentry->d_inode && d_is_symlink(dentry)))
489 status = -EISDIR; 489 status = -EISDIR;
490 } 490 }
491 spin_unlock(&sbi->fs_lock); 491 spin_unlock(&sbi->fs_lock);
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index afd2b4408adf..861b1e1c4777 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -15,161 +15,14 @@
15#include <linux/namei.h> 15#include <linux/namei.h>
16#include <linux/poll.h> 16#include <linux/poll.h>
17 17
18
19static loff_t bad_file_llseek(struct file *file, loff_t offset, int whence)
20{
21 return -EIO;
22}
23
24static ssize_t bad_file_read(struct file *filp, char __user *buf,
25 size_t size, loff_t *ppos)
26{
27 return -EIO;
28}
29
30static ssize_t bad_file_write(struct file *filp, const char __user *buf,
31 size_t siz, loff_t *ppos)
32{
33 return -EIO;
34}
35
36static ssize_t bad_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
37 unsigned long nr_segs, loff_t pos)
38{
39 return -EIO;
40}
41
42static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
43 unsigned long nr_segs, loff_t pos)
44{
45 return -EIO;
46}
47
48static int bad_file_readdir(struct file *file, struct dir_context *ctx)
49{
50 return -EIO;
51}
52
53static unsigned int bad_file_poll(struct file *filp, poll_table *wait)
54{
55 return POLLERR;
56}
57
58static long bad_file_unlocked_ioctl(struct file *file, unsigned cmd,
59 unsigned long arg)
60{
61 return -EIO;
62}
63
64static long bad_file_compat_ioctl(struct file *file, unsigned int cmd,
65 unsigned long arg)
66{
67 return -EIO;
68}
69
70static int bad_file_mmap(struct file *file, struct vm_area_struct *vma)
71{
72 return -EIO;
73}
74
75static int bad_file_open(struct inode *inode, struct file *filp) 18static int bad_file_open(struct inode *inode, struct file *filp)
76{ 19{
77 return -EIO; 20 return -EIO;
78} 21}
79 22
80static int bad_file_flush(struct file *file, fl_owner_t id)
81{
82 return -EIO;
83}
84
85static int bad_file_release(struct inode *inode, struct file *filp)
86{
87 return -EIO;
88}
89
90static int bad_file_fsync(struct file *file, loff_t start, loff_t end,
91 int datasync)
92{
93 return -EIO;
94}
95
96static int bad_file_aio_fsync(struct kiocb *iocb, int datasync)
97{
98 return -EIO;
99}
100
101static int bad_file_fasync(int fd, struct file *filp, int on)
102{
103 return -EIO;
104}
105
106static int bad_file_lock(struct file *file, int cmd, struct file_lock *fl)
107{
108 return -EIO;
109}
110
111static ssize_t bad_file_sendpage(struct file *file, struct page *page,
112 int off, size_t len, loff_t *pos, int more)
113{
114 return -EIO;
115}
116
117static unsigned long bad_file_get_unmapped_area(struct file *file,
118 unsigned long addr, unsigned long len,
119 unsigned long pgoff, unsigned long flags)
120{
121 return -EIO;
122}
123
124static int bad_file_check_flags(int flags)
125{
126 return -EIO;
127}
128
129static int bad_file_flock(struct file *filp, int cmd, struct file_lock *fl)
130{
131 return -EIO;
132}
133
134static ssize_t bad_file_splice_write(struct pipe_inode_info *pipe,
135 struct file *out, loff_t *ppos, size_t len,
136 unsigned int flags)
137{
138 return -EIO;
139}
140
141static ssize_t bad_file_splice_read(struct file *in, loff_t *ppos,
142 struct pipe_inode_info *pipe, size_t len,
143 unsigned int flags)
144{
145 return -EIO;
146}
147
148static const struct file_operations bad_file_ops = 23static const struct file_operations bad_file_ops =
149{ 24{
150 .llseek = bad_file_llseek,
151 .read = bad_file_read,
152 .write = bad_file_write,
153 .aio_read = bad_file_aio_read,
154 .aio_write = bad_file_aio_write,
155 .iterate = bad_file_readdir,
156 .poll = bad_file_poll,
157 .unlocked_ioctl = bad_file_unlocked_ioctl,
158 .compat_ioctl = bad_file_compat_ioctl,
159 .mmap = bad_file_mmap,
160 .open = bad_file_open, 25 .open = bad_file_open,
161 .flush = bad_file_flush,
162 .release = bad_file_release,
163 .fsync = bad_file_fsync,
164 .aio_fsync = bad_file_aio_fsync,
165 .fasync = bad_file_fasync,
166 .lock = bad_file_lock,
167 .sendpage = bad_file_sendpage,
168 .get_unmapped_area = bad_file_get_unmapped_area,
169 .check_flags = bad_file_check_flags,
170 .flock = bad_file_flock,
171 .splice_write = bad_file_splice_write,
172 .splice_read = bad_file_splice_read,
173}; 26};
174 27
175static int bad_inode_create (struct inode *dir, struct dentry *dentry, 28static int bad_inode_create (struct inode *dir, struct dentry *dentry,
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 02b16910f4c9..995986b8e36b 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -645,11 +645,12 @@ out:
645 645
646static unsigned long randomize_stack_top(unsigned long stack_top) 646static unsigned long randomize_stack_top(unsigned long stack_top)
647{ 647{
648 unsigned int random_variable = 0; 648 unsigned long random_variable = 0;
649 649
650 if ((current->flags & PF_RANDOMIZE) && 650 if ((current->flags & PF_RANDOMIZE) &&
651 !(current->personality & ADDR_NO_RANDOMIZE)) { 651 !(current->personality & ADDR_NO_RANDOMIZE)) {
652 random_variable = get_random_int() & STACK_RND_MASK; 652 random_variable = (unsigned long) get_random_int();
653 random_variable &= STACK_RND_MASK;
653 random_variable <<= PAGE_SHIFT; 654 random_variable <<= PAGE_SHIFT;
654 } 655 }
655#ifdef CONFIG_STACK_GROWSUP 656#ifdef CONFIG_STACK_GROWSUP
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 8729cf68d2fe..f55721ff9385 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1246,25 +1246,6 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans,
1246 return ret; 1246 return ret;
1247} 1247}
1248 1248
1249/*
1250 * this makes the path point to (inum INODE_ITEM ioff)
1251 */
1252int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1253 struct btrfs_path *path)
1254{
1255 struct btrfs_key key;
1256 return btrfs_find_item(fs_root, path, inum, ioff,
1257 BTRFS_INODE_ITEM_KEY, &key);
1258}
1259
1260static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1261 struct btrfs_path *path,
1262 struct btrfs_key *found_key)
1263{
1264 return btrfs_find_item(fs_root, path, inum, ioff,
1265 BTRFS_INODE_REF_KEY, found_key);
1266}
1267
1268int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, 1249int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1269 u64 start_off, struct btrfs_path *path, 1250 u64 start_off, struct btrfs_path *path,
1270 struct btrfs_inode_extref **ret_extref, 1251 struct btrfs_inode_extref **ret_extref,
@@ -1374,7 +1355,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1374 btrfs_tree_read_unlock_blocking(eb); 1355 btrfs_tree_read_unlock_blocking(eb);
1375 free_extent_buffer(eb); 1356 free_extent_buffer(eb);
1376 } 1357 }
1377 ret = inode_ref_info(parent, 0, fs_root, path, &found_key); 1358 ret = btrfs_find_item(fs_root, path, parent, 0,
1359 BTRFS_INODE_REF_KEY, &found_key);
1378 if (ret > 0) 1360 if (ret > 0)
1379 ret = -ENOENT; 1361 ret = -ENOENT;
1380 if (ret) 1362 if (ret)
@@ -1727,8 +1709,10 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
1727 struct btrfs_key found_key; 1709 struct btrfs_key found_key;
1728 1710
1729 while (!ret) { 1711 while (!ret) {
1730 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, 1712 ret = btrfs_find_item(fs_root, path, inum,
1731 &found_key); 1713 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
1714 &found_key);
1715
1732 if (ret < 0) 1716 if (ret < 0)
1733 break; 1717 break;
1734 if (ret) { 1718 if (ret) {
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 2a1ac6bfc724..9c41fbac3009 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -32,9 +32,6 @@ struct inode_fs_paths {
32typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root, 32typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
33 void *ctx); 33 void *ctx);
34 34
35int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
36 struct btrfs_path *path);
37
38int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, 35int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
39 struct btrfs_path *path, struct btrfs_key *found_key, 36 struct btrfs_path *path, struct btrfs_key *found_key,
40 u64 *flags); 37 u64 *flags);
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 4aadadcfab20..de5e4f2adfea 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -185,6 +185,9 @@ struct btrfs_inode {
185 185
186 struct btrfs_delayed_node *delayed_node; 186 struct btrfs_delayed_node *delayed_node;
187 187
188 /* File creation time. */
189 struct timespec i_otime;
190
188 struct inode vfs_inode; 191 struct inode vfs_inode;
189}; 192};
190 193
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 14a72ed14ef7..993642199326 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -213,11 +213,19 @@ static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
213 */ 213 */
214static void add_root_to_dirty_list(struct btrfs_root *root) 214static void add_root_to_dirty_list(struct btrfs_root *root)
215{ 215{
216 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
217 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
218 return;
219
216 spin_lock(&root->fs_info->trans_lock); 220 spin_lock(&root->fs_info->trans_lock);
217 if (test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state) && 221 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
218 list_empty(&root->dirty_list)) { 222 /* Want the extent tree to be the last on the list */
219 list_add(&root->dirty_list, 223 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
220 &root->fs_info->dirty_cowonly_roots); 224 list_move_tail(&root->dirty_list,
225 &root->fs_info->dirty_cowonly_roots);
226 else
227 list_move(&root->dirty_list,
228 &root->fs_info->dirty_cowonly_roots);
221 } 229 }
222 spin_unlock(&root->fs_info->trans_lock); 230 spin_unlock(&root->fs_info->trans_lock);
223} 231}
@@ -1363,8 +1371,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1363 1371
1364 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { 1372 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1365 BUG_ON(tm->slot != 0); 1373 BUG_ON(tm->slot != 0);
1366 eb_rewin = alloc_dummy_extent_buffer(eb->start, 1374 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1367 fs_info->tree_root->nodesize);
1368 if (!eb_rewin) { 1375 if (!eb_rewin) {
1369 btrfs_tree_read_unlock_blocking(eb); 1376 btrfs_tree_read_unlock_blocking(eb);
1370 free_extent_buffer(eb); 1377 free_extent_buffer(eb);
@@ -1444,7 +1451,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
1444 } else if (old_root) { 1451 } else if (old_root) {
1445 btrfs_tree_read_unlock(eb_root); 1452 btrfs_tree_read_unlock(eb_root);
1446 free_extent_buffer(eb_root); 1453 free_extent_buffer(eb_root);
1447 eb = alloc_dummy_extent_buffer(logical, root->nodesize); 1454 eb = alloc_dummy_extent_buffer(root->fs_info, logical);
1448 } else { 1455 } else {
1449 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK); 1456 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1450 eb = btrfs_clone_extent_buffer(eb_root); 1457 eb = btrfs_clone_extent_buffer(eb_root);
@@ -2282,7 +2289,7 @@ static void reada_for_search(struct btrfs_root *root,
2282 if ((search <= target && target - search <= 65536) || 2289 if ((search <= target && target - search <= 65536) ||
2283 (search > target && search - target <= 65536)) { 2290 (search > target && search - target <= 65536)) {
2284 gen = btrfs_node_ptr_generation(node, nr); 2291 gen = btrfs_node_ptr_generation(node, nr);
2285 readahead_tree_block(root, search, blocksize); 2292 readahead_tree_block(root, search);
2286 nread += blocksize; 2293 nread += blocksize;
2287 } 2294 }
2288 nscan++; 2295 nscan++;
@@ -2301,7 +2308,6 @@ static noinline void reada_for_balance(struct btrfs_root *root,
2301 u64 gen; 2308 u64 gen;
2302 u64 block1 = 0; 2309 u64 block1 = 0;
2303 u64 block2 = 0; 2310 u64 block2 = 0;
2304 int blocksize;
2305 2311
2306 parent = path->nodes[level + 1]; 2312 parent = path->nodes[level + 1];
2307 if (!parent) 2313 if (!parent)
@@ -2309,7 +2315,6 @@ static noinline void reada_for_balance(struct btrfs_root *root,
2309 2315
2310 nritems = btrfs_header_nritems(parent); 2316 nritems = btrfs_header_nritems(parent);
2311 slot = path->slots[level + 1]; 2317 slot = path->slots[level + 1];
2312 blocksize = root->nodesize;
2313 2318
2314 if (slot > 0) { 2319 if (slot > 0) {
2315 block1 = btrfs_node_blockptr(parent, slot - 1); 2320 block1 = btrfs_node_blockptr(parent, slot - 1);
@@ -2334,9 +2339,9 @@ static noinline void reada_for_balance(struct btrfs_root *root,
2334 } 2339 }
2335 2340
2336 if (block1) 2341 if (block1)
2337 readahead_tree_block(root, block1, blocksize); 2342 readahead_tree_block(root, block1);
2338 if (block2) 2343 if (block2)
2339 readahead_tree_block(root, block2, blocksize); 2344 readahead_tree_block(root, block2);
2340} 2345}
2341 2346
2342 2347
@@ -2609,32 +2614,24 @@ static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2609 return 0; 2614 return 0;
2610} 2615}
2611 2616
2612int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path, 2617int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2613 u64 iobjectid, u64 ioff, u8 key_type, 2618 u64 iobjectid, u64 ioff, u8 key_type,
2614 struct btrfs_key *found_key) 2619 struct btrfs_key *found_key)
2615{ 2620{
2616 int ret; 2621 int ret;
2617 struct btrfs_key key; 2622 struct btrfs_key key;
2618 struct extent_buffer *eb; 2623 struct extent_buffer *eb;
2619 struct btrfs_path *path; 2624
2625 ASSERT(path);
2626 ASSERT(found_key);
2620 2627
2621 key.type = key_type; 2628 key.type = key_type;
2622 key.objectid = iobjectid; 2629 key.objectid = iobjectid;
2623 key.offset = ioff; 2630 key.offset = ioff;
2624 2631
2625 if (found_path == NULL) {
2626 path = btrfs_alloc_path();
2627 if (!path)
2628 return -ENOMEM;
2629 } else
2630 path = found_path;
2631
2632 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 2632 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2633 if ((ret < 0) || (found_key == NULL)) { 2633 if (ret < 0)
2634 if (path != found_path)
2635 btrfs_free_path(path);
2636 return ret; 2634 return ret;
2637 }
2638 2635
2639 eb = path->nodes[0]; 2636 eb = path->nodes[0];
2640 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 2637 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
@@ -3383,7 +3380,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3383 add_root_to_dirty_list(root); 3380 add_root_to_dirty_list(root);
3384 extent_buffer_get(c); 3381 extent_buffer_get(c);
3385 path->nodes[level] = c; 3382 path->nodes[level] = c;
3386 path->locks[level] = BTRFS_WRITE_LOCK; 3383 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3387 path->slots[level] = 0; 3384 path->slots[level] = 0;
3388 return 0; 3385 return 0;
3389} 3386}
@@ -4356,13 +4353,15 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4356 path->search_for_split = 1; 4353 path->search_for_split = 1;
4357 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 4354 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4358 path->search_for_split = 0; 4355 path->search_for_split = 0;
4356 if (ret > 0)
4357 ret = -EAGAIN;
4359 if (ret < 0) 4358 if (ret < 0)
4360 goto err; 4359 goto err;
4361 4360
4362 ret = -EAGAIN; 4361 ret = -EAGAIN;
4363 leaf = path->nodes[0]; 4362 leaf = path->nodes[0];
4364 /* if our item isn't there or got smaller, return now */ 4363 /* if our item isn't there, return now */
4365 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0])) 4364 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4366 goto err; 4365 goto err;
4367 4366
4368 /* the leaf has changed, it now has room. return now */ 4367 /* the leaf has changed, it now has room. return now */
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0b180708bf79..84c3b00f3de8 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -198,6 +198,8 @@ static int btrfs_csum_sizes[] = { 4, 0 };
198 198
199#define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024) 199#define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024)
200 200
201#define BTRFS_MAX_EXTENT_SIZE (128 * 1024 * 1024)
202
201/* 203/*
202 * The key defines the order in the tree, and so it also defines (optimal) 204 * The key defines the order in the tree, and so it also defines (optimal)
203 * block layout. 205 * block layout.
@@ -1020,6 +1022,9 @@ enum btrfs_raid_types {
1020 BTRFS_BLOCK_GROUP_RAID6 | \ 1022 BTRFS_BLOCK_GROUP_RAID6 | \
1021 BTRFS_BLOCK_GROUP_DUP | \ 1023 BTRFS_BLOCK_GROUP_DUP | \
1022 BTRFS_BLOCK_GROUP_RAID10) 1024 BTRFS_BLOCK_GROUP_RAID10)
1025#define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \
1026 BTRFS_BLOCK_GROUP_RAID6)
1027
1023/* 1028/*
1024 * We need a bit for restriper to be able to tell when chunks of type 1029 * We need a bit for restriper to be able to tell when chunks of type
1025 * SINGLE are available. This "extended" profile format is used in 1030 * SINGLE are available. This "extended" profile format is used in
@@ -1239,7 +1244,6 @@ enum btrfs_disk_cache_state {
1239 BTRFS_DC_ERROR = 1, 1244 BTRFS_DC_ERROR = 1,
1240 BTRFS_DC_CLEAR = 2, 1245 BTRFS_DC_CLEAR = 2,
1241 BTRFS_DC_SETUP = 3, 1246 BTRFS_DC_SETUP = 3,
1242 BTRFS_DC_NEED_WRITE = 4,
1243}; 1247};
1244 1248
1245struct btrfs_caching_control { 1249struct btrfs_caching_control {
@@ -1277,7 +1281,6 @@ struct btrfs_block_group_cache {
1277 unsigned long full_stripe_len; 1281 unsigned long full_stripe_len;
1278 1282
1279 unsigned int ro:1; 1283 unsigned int ro:1;
1280 unsigned int dirty:1;
1281 unsigned int iref:1; 1284 unsigned int iref:1;
1282 unsigned int has_caching_ctl:1; 1285 unsigned int has_caching_ctl:1;
1283 unsigned int removed:1; 1286 unsigned int removed:1;
@@ -1315,6 +1318,9 @@ struct btrfs_block_group_cache {
1315 struct list_head ro_list; 1318 struct list_head ro_list;
1316 1319
1317 atomic_t trimming; 1320 atomic_t trimming;
1321
1322 /* For dirty block groups */
1323 struct list_head dirty_list;
1318}; 1324};
1319 1325
1320/* delayed seq elem */ 1326/* delayed seq elem */
@@ -1741,6 +1747,7 @@ struct btrfs_fs_info {
1741 1747
1742 spinlock_t unused_bgs_lock; 1748 spinlock_t unused_bgs_lock;
1743 struct list_head unused_bgs; 1749 struct list_head unused_bgs;
1750 struct mutex unused_bg_unpin_mutex;
1744 1751
1745 /* For btrfs to record security options */ 1752 /* For btrfs to record security options */
1746 struct security_mnt_opts security_opts; 1753 struct security_mnt_opts security_opts;
@@ -1776,6 +1783,7 @@ struct btrfs_subvolume_writers {
1776#define BTRFS_ROOT_DEFRAG_RUNNING 6 1783#define BTRFS_ROOT_DEFRAG_RUNNING 6
1777#define BTRFS_ROOT_FORCE_COW 7 1784#define BTRFS_ROOT_FORCE_COW 7
1778#define BTRFS_ROOT_MULTI_LOG_TASKS 8 1785#define BTRFS_ROOT_MULTI_LOG_TASKS 8
1786#define BTRFS_ROOT_DIRTY 9
1779 1787
1780/* 1788/*
1781 * in ram representation of the tree. extent_root is used for all allocations 1789 * in ram representation of the tree. extent_root is used for all allocations
@@ -1794,8 +1802,6 @@ struct btrfs_root {
1794 struct btrfs_fs_info *fs_info; 1802 struct btrfs_fs_info *fs_info;
1795 struct extent_io_tree dirty_log_pages; 1803 struct extent_io_tree dirty_log_pages;
1796 1804
1797 struct kobject root_kobj;
1798 struct completion kobj_unregister;
1799 struct mutex objectid_mutex; 1805 struct mutex objectid_mutex;
1800 1806
1801 spinlock_t accounting_lock; 1807 spinlock_t accounting_lock;
@@ -2465,31 +2471,6 @@ BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
2465BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); 2471BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
2466BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); 2472BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
2467BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); 2473BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
2468
2469static inline struct btrfs_timespec *
2470btrfs_inode_atime(struct btrfs_inode_item *inode_item)
2471{
2472 unsigned long ptr = (unsigned long)inode_item;
2473 ptr += offsetof(struct btrfs_inode_item, atime);
2474 return (struct btrfs_timespec *)ptr;
2475}
2476
2477static inline struct btrfs_timespec *
2478btrfs_inode_mtime(struct btrfs_inode_item *inode_item)
2479{
2480 unsigned long ptr = (unsigned long)inode_item;
2481 ptr += offsetof(struct btrfs_inode_item, mtime);
2482 return (struct btrfs_timespec *)ptr;
2483}
2484
2485static inline struct btrfs_timespec *
2486btrfs_inode_ctime(struct btrfs_inode_item *inode_item)
2487{
2488 unsigned long ptr = (unsigned long)inode_item;
2489 ptr += offsetof(struct btrfs_inode_item, ctime);
2490 return (struct btrfs_timespec *)ptr;
2491}
2492
2493BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); 2474BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64);
2494BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); 2475BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
2495BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); 2476BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index de4e70fb3cbb..82f0c7c95474 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1755,27 +1755,31 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1755 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); 1755 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1756 btrfs_set_stack_inode_block_group(inode_item, 0); 1756 btrfs_set_stack_inode_block_group(inode_item, 0);
1757 1757
1758 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), 1758 btrfs_set_stack_timespec_sec(&inode_item->atime,
1759 inode->i_atime.tv_sec); 1759 inode->i_atime.tv_sec);
1760 btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item), 1760 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1761 inode->i_atime.tv_nsec); 1761 inode->i_atime.tv_nsec);
1762 1762
1763 btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item), 1763 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1764 inode->i_mtime.tv_sec); 1764 inode->i_mtime.tv_sec);
1765 btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item), 1765 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1766 inode->i_mtime.tv_nsec); 1766 inode->i_mtime.tv_nsec);
1767 1767
1768 btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item), 1768 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1769 inode->i_ctime.tv_sec); 1769 inode->i_ctime.tv_sec);
1770 btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item), 1770 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1771 inode->i_ctime.tv_nsec); 1771 inode->i_ctime.tv_nsec);
1772
1773 btrfs_set_stack_timespec_sec(&inode_item->otime,
1774 BTRFS_I(inode)->i_otime.tv_sec);
1775 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1776 BTRFS_I(inode)->i_otime.tv_nsec);
1772} 1777}
1773 1778
1774int btrfs_fill_inode(struct inode *inode, u32 *rdev) 1779int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1775{ 1780{
1776 struct btrfs_delayed_node *delayed_node; 1781 struct btrfs_delayed_node *delayed_node;
1777 struct btrfs_inode_item *inode_item; 1782 struct btrfs_inode_item *inode_item;
1778 struct btrfs_timespec *tspec;
1779 1783
1780 delayed_node = btrfs_get_delayed_node(inode); 1784 delayed_node = btrfs_get_delayed_node(inode);
1781 if (!delayed_node) 1785 if (!delayed_node)
@@ -1802,17 +1806,19 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1802 *rdev = btrfs_stack_inode_rdev(inode_item); 1806 *rdev = btrfs_stack_inode_rdev(inode_item);
1803 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item); 1807 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1804 1808
1805 tspec = btrfs_inode_atime(inode_item); 1809 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1806 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec); 1810 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1807 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec); 1811
1812 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1813 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1808 1814
1809 tspec = btrfs_inode_mtime(inode_item); 1815 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1810 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec); 1816 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1811 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1812 1817
1813 tspec = btrfs_inode_ctime(inode_item); 1818 BTRFS_I(inode)->i_otime.tv_sec =
1814 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec); 1819 btrfs_stack_timespec_sec(&inode_item->otime);
1815 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec); 1820 BTRFS_I(inode)->i_otime.tv_nsec =
1821 btrfs_stack_timespec_nsec(&inode_item->otime);
1816 1822
1817 inode->i_generation = BTRFS_I(inode)->generation; 1823 inode->i_generation = BTRFS_I(inode)->generation;
1818 BTRFS_I(inode)->index_cnt = (u64)-1; 1824 BTRFS_I(inode)->index_cnt = (u64)-1;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index ca6a3a3b6b6c..5ec03d999c37 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -440,18 +440,9 @@ leave:
440 */ 440 */
441static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info) 441static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
442{ 442{
443 s64 writers;
444 DEFINE_WAIT(wait);
445
446 set_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state); 443 set_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state);
447 do { 444 wait_event(fs_info->replace_wait, !percpu_counter_sum(
448 prepare_to_wait(&fs_info->replace_wait, &wait, 445 &fs_info->bio_counter));
449 TASK_UNINTERRUPTIBLE);
450 writers = percpu_counter_sum(&fs_info->bio_counter);
451 if (writers)
452 schedule();
453 finish_wait(&fs_info->replace_wait, &wait);
454 } while (writers);
455} 446}
456 447
457/* 448/*
@@ -932,15 +923,15 @@ void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount)
932 923
933void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info) 924void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info)
934{ 925{
935 DEFINE_WAIT(wait); 926 while (1) {
936again: 927 percpu_counter_inc(&fs_info->bio_counter);
937 percpu_counter_inc(&fs_info->bio_counter); 928 if (likely(!test_bit(BTRFS_FS_STATE_DEV_REPLACING,
938 if (test_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state)) { 929 &fs_info->fs_state)))
930 break;
931
939 btrfs_bio_counter_dec(fs_info); 932 btrfs_bio_counter_dec(fs_info);
940 wait_event(fs_info->replace_wait, 933 wait_event(fs_info->replace_wait,
941 !test_bit(BTRFS_FS_STATE_DEV_REPLACING, 934 !test_bit(BTRFS_FS_STATE_DEV_REPLACING,
942 &fs_info->fs_state)); 935 &fs_info->fs_state));
943 goto again;
944 } 936 }
945
946} 937}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1afb18226da8..f79f38542a73 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -318,7 +318,7 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
318 memcpy(&found, result, csum_size); 318 memcpy(&found, result, csum_size);
319 319
320 read_extent_buffer(buf, &val, 0, csum_size); 320 read_extent_buffer(buf, &val, 0, csum_size);
321 printk_ratelimited(KERN_INFO 321 printk_ratelimited(KERN_WARNING
322 "BTRFS: %s checksum verify failed on %llu wanted %X found %X " 322 "BTRFS: %s checksum verify failed on %llu wanted %X found %X "
323 "level %d\n", 323 "level %d\n",
324 root->fs_info->sb->s_id, buf->start, 324 root->fs_info->sb->s_id, buf->start,
@@ -367,7 +367,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
367 ret = 0; 367 ret = 0;
368 goto out; 368 goto out;
369 } 369 }
370 printk_ratelimited(KERN_INFO "BTRFS (device %s): parent transid verify failed on %llu wanted %llu found %llu\n", 370 printk_ratelimited(KERN_ERR
371 "BTRFS (device %s): parent transid verify failed on %llu wanted %llu found %llu\n",
371 eb->fs_info->sb->s_id, eb->start, 372 eb->fs_info->sb->s_id, eb->start,
372 parent_transid, btrfs_header_generation(eb)); 373 parent_transid, btrfs_header_generation(eb));
373 ret = 1; 374 ret = 1;
@@ -633,21 +634,21 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
633 634
634 found_start = btrfs_header_bytenr(eb); 635 found_start = btrfs_header_bytenr(eb);
635 if (found_start != eb->start) { 636 if (found_start != eb->start) {
636 printk_ratelimited(KERN_INFO "BTRFS (device %s): bad tree block start " 637 printk_ratelimited(KERN_ERR "BTRFS (device %s): bad tree block start "
637 "%llu %llu\n", 638 "%llu %llu\n",
638 eb->fs_info->sb->s_id, found_start, eb->start); 639 eb->fs_info->sb->s_id, found_start, eb->start);
639 ret = -EIO; 640 ret = -EIO;
640 goto err; 641 goto err;
641 } 642 }
642 if (check_tree_block_fsid(root, eb)) { 643 if (check_tree_block_fsid(root, eb)) {
643 printk_ratelimited(KERN_INFO "BTRFS (device %s): bad fsid on block %llu\n", 644 printk_ratelimited(KERN_ERR "BTRFS (device %s): bad fsid on block %llu\n",
644 eb->fs_info->sb->s_id, eb->start); 645 eb->fs_info->sb->s_id, eb->start);
645 ret = -EIO; 646 ret = -EIO;
646 goto err; 647 goto err;
647 } 648 }
648 found_level = btrfs_header_level(eb); 649 found_level = btrfs_header_level(eb);
649 if (found_level >= BTRFS_MAX_LEVEL) { 650 if (found_level >= BTRFS_MAX_LEVEL) {
650 btrfs_info(root->fs_info, "bad tree block level %d", 651 btrfs_err(root->fs_info, "bad tree block level %d",
651 (int)btrfs_header_level(eb)); 652 (int)btrfs_header_level(eb));
652 ret = -EIO; 653 ret = -EIO;
653 goto err; 654 goto err;
@@ -1073,12 +1074,12 @@ static const struct address_space_operations btree_aops = {
1073 .set_page_dirty = btree_set_page_dirty, 1074 .set_page_dirty = btree_set_page_dirty,
1074}; 1075};
1075 1076
1076void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize) 1077void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
1077{ 1078{
1078 struct extent_buffer *buf = NULL; 1079 struct extent_buffer *buf = NULL;
1079 struct inode *btree_inode = root->fs_info->btree_inode; 1080 struct inode *btree_inode = root->fs_info->btree_inode;
1080 1081
1081 buf = btrfs_find_create_tree_block(root, bytenr, blocksize); 1082 buf = btrfs_find_create_tree_block(root, bytenr);
1082 if (!buf) 1083 if (!buf)
1083 return; 1084 return;
1084 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, 1085 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
@@ -1086,7 +1087,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize)
1086 free_extent_buffer(buf); 1087 free_extent_buffer(buf);
1087} 1088}
1088 1089
1089int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, 1090int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
1090 int mirror_num, struct extent_buffer **eb) 1091 int mirror_num, struct extent_buffer **eb)
1091{ 1092{
1092 struct extent_buffer *buf = NULL; 1093 struct extent_buffer *buf = NULL;
@@ -1094,7 +1095,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1094 struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; 1095 struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1095 int ret; 1096 int ret;
1096 1097
1097 buf = btrfs_find_create_tree_block(root, bytenr, blocksize); 1098 buf = btrfs_find_create_tree_block(root, bytenr);
1098 if (!buf) 1099 if (!buf)
1099 return 0; 1100 return 0;
1100 1101
@@ -1125,12 +1126,11 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1125} 1126}
1126 1127
1127struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, 1128struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1128 u64 bytenr, u32 blocksize) 1129 u64 bytenr)
1129{ 1130{
1130 if (btrfs_test_is_dummy_root(root)) 1131 if (btrfs_test_is_dummy_root(root))
1131 return alloc_test_extent_buffer(root->fs_info, bytenr, 1132 return alloc_test_extent_buffer(root->fs_info, bytenr);
1132 blocksize); 1133 return alloc_extent_buffer(root->fs_info, bytenr);
1133 return alloc_extent_buffer(root->fs_info, bytenr, blocksize);
1134} 1134}
1135 1135
1136 1136
@@ -1152,7 +1152,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1152 struct extent_buffer *buf = NULL; 1152 struct extent_buffer *buf = NULL;
1153 int ret; 1153 int ret;
1154 1154
1155 buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); 1155 buf = btrfs_find_create_tree_block(root, bytenr);
1156 if (!buf) 1156 if (!buf)
1157 return NULL; 1157 return NULL;
1158 1158
@@ -1275,12 +1275,10 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
1275 memset(&root->root_key, 0, sizeof(root->root_key)); 1275 memset(&root->root_key, 0, sizeof(root->root_key));
1276 memset(&root->root_item, 0, sizeof(root->root_item)); 1276 memset(&root->root_item, 0, sizeof(root->root_item));
1277 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); 1277 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1278 memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1279 if (fs_info) 1278 if (fs_info)
1280 root->defrag_trans_start = fs_info->generation; 1279 root->defrag_trans_start = fs_info->generation;
1281 else 1280 else
1282 root->defrag_trans_start = 0; 1281 root->defrag_trans_start = 0;
1283 init_completion(&root->kobj_unregister);
1284 root->root_key.objectid = objectid; 1282 root->root_key.objectid = objectid;
1285 root->anon_dev = 0; 1283 root->anon_dev = 0;
1286 1284
@@ -1630,6 +1628,8 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1630 bool check_ref) 1628 bool check_ref)
1631{ 1629{
1632 struct btrfs_root *root; 1630 struct btrfs_root *root;
1631 struct btrfs_path *path;
1632 struct btrfs_key key;
1633 int ret; 1633 int ret;
1634 1634
1635 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) 1635 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
@@ -1669,8 +1669,17 @@ again:
1669 if (ret) 1669 if (ret)
1670 goto fail; 1670 goto fail;
1671 1671
1672 ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID, 1672 path = btrfs_alloc_path();
1673 location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL); 1673 if (!path) {
1674 ret = -ENOMEM;
1675 goto fail;
1676 }
1677 key.objectid = BTRFS_ORPHAN_OBJECTID;
1678 key.type = BTRFS_ORPHAN_ITEM_KEY;
1679 key.offset = location->objectid;
1680
1681 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1682 btrfs_free_path(path);
1674 if (ret < 0) 1683 if (ret < 0)
1675 goto fail; 1684 goto fail;
1676 if (ret == 0) 1685 if (ret == 0)
@@ -2232,6 +2241,7 @@ int open_ctree(struct super_block *sb,
2232 spin_lock_init(&fs_info->qgroup_op_lock); 2241 spin_lock_init(&fs_info->qgroup_op_lock);
2233 spin_lock_init(&fs_info->buffer_lock); 2242 spin_lock_init(&fs_info->buffer_lock);
2234 spin_lock_init(&fs_info->unused_bgs_lock); 2243 spin_lock_init(&fs_info->unused_bgs_lock);
2244 mutex_init(&fs_info->unused_bg_unpin_mutex);
2235 rwlock_init(&fs_info->tree_mod_log_lock); 2245 rwlock_init(&fs_info->tree_mod_log_lock);
2236 mutex_init(&fs_info->reloc_mutex); 2246 mutex_init(&fs_info->reloc_mutex);
2237 mutex_init(&fs_info->delalloc_root_mutex); 2247 mutex_init(&fs_info->delalloc_root_mutex);
@@ -2496,7 +2506,7 @@ int open_ctree(struct super_block *sb,
2496 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; 2506 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2497 2507
2498 if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) 2508 if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2499 printk(KERN_ERR "BTRFS: has skinny extents\n"); 2509 printk(KERN_INFO "BTRFS: has skinny extents\n");
2500 2510
2501 /* 2511 /*
2502 * flag our filesystem as having big metadata blocks if 2512 * flag our filesystem as having big metadata blocks if
@@ -2520,7 +2530,7 @@ int open_ctree(struct super_block *sb,
2520 */ 2530 */
2521 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && 2531 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2522 (sectorsize != nodesize)) { 2532 (sectorsize != nodesize)) {
2523 printk(KERN_WARNING "BTRFS: unequal leaf/node/sector sizes " 2533 printk(KERN_ERR "BTRFS: unequal leaf/node/sector sizes "
2524 "are not allowed for mixed block groups on %s\n", 2534 "are not allowed for mixed block groups on %s\n",
2525 sb->s_id); 2535 sb->s_id);
2526 goto fail_alloc; 2536 goto fail_alloc;
@@ -2628,12 +2638,12 @@ int open_ctree(struct super_block *sb,
2628 sb->s_blocksize_bits = blksize_bits(sectorsize); 2638 sb->s_blocksize_bits = blksize_bits(sectorsize);
2629 2639
2630 if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 2640 if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
2631 printk(KERN_INFO "BTRFS: valid FS not found on %s\n", sb->s_id); 2641 printk(KERN_ERR "BTRFS: valid FS not found on %s\n", sb->s_id);
2632 goto fail_sb_buffer; 2642 goto fail_sb_buffer;
2633 } 2643 }
2634 2644
2635 if (sectorsize != PAGE_SIZE) { 2645 if (sectorsize != PAGE_SIZE) {
2636 printk(KERN_WARNING "BTRFS: Incompatible sector size(%lu) " 2646 printk(KERN_ERR "BTRFS: incompatible sector size (%lu) "
2637 "found on %s\n", (unsigned long)sectorsize, sb->s_id); 2647 "found on %s\n", (unsigned long)sectorsize, sb->s_id);
2638 goto fail_sb_buffer; 2648 goto fail_sb_buffer;
2639 } 2649 }
@@ -2642,7 +2652,7 @@ int open_ctree(struct super_block *sb,
2642 ret = btrfs_read_sys_array(tree_root); 2652 ret = btrfs_read_sys_array(tree_root);
2643 mutex_unlock(&fs_info->chunk_mutex); 2653 mutex_unlock(&fs_info->chunk_mutex);
2644 if (ret) { 2654 if (ret) {
2645 printk(KERN_WARNING "BTRFS: failed to read the system " 2655 printk(KERN_ERR "BTRFS: failed to read the system "
2646 "array on %s\n", sb->s_id); 2656 "array on %s\n", sb->s_id);
2647 goto fail_sb_buffer; 2657 goto fail_sb_buffer;
2648 } 2658 }
@@ -2657,7 +2667,7 @@ int open_ctree(struct super_block *sb,
2657 generation); 2667 generation);
2658 if (!chunk_root->node || 2668 if (!chunk_root->node ||
2659 !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { 2669 !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2660 printk(KERN_WARNING "BTRFS: failed to read chunk root on %s\n", 2670 printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
2661 sb->s_id); 2671 sb->s_id);
2662 goto fail_tree_roots; 2672 goto fail_tree_roots;
2663 } 2673 }
@@ -2669,7 +2679,7 @@ int open_ctree(struct super_block *sb,
2669 2679
2670 ret = btrfs_read_chunk_tree(chunk_root); 2680 ret = btrfs_read_chunk_tree(chunk_root);
2671 if (ret) { 2681 if (ret) {
2672 printk(KERN_WARNING "BTRFS: failed to read chunk tree on %s\n", 2682 printk(KERN_ERR "BTRFS: failed to read chunk tree on %s\n",
2673 sb->s_id); 2683 sb->s_id);
2674 goto fail_tree_roots; 2684 goto fail_tree_roots;
2675 } 2685 }
@@ -2681,7 +2691,7 @@ int open_ctree(struct super_block *sb,
2681 btrfs_close_extra_devices(fs_info, fs_devices, 0); 2691 btrfs_close_extra_devices(fs_info, fs_devices, 0);
2682 2692
2683 if (!fs_devices->latest_bdev) { 2693 if (!fs_devices->latest_bdev) {
2684 printk(KERN_CRIT "BTRFS: failed to read devices on %s\n", 2694 printk(KERN_ERR "BTRFS: failed to read devices on %s\n",
2685 sb->s_id); 2695 sb->s_id);
2686 goto fail_tree_roots; 2696 goto fail_tree_roots;
2687 } 2697 }
@@ -2765,7 +2775,7 @@ retry_root_backup:
2765 2775
2766 ret = btrfs_recover_balance(fs_info); 2776 ret = btrfs_recover_balance(fs_info);
2767 if (ret) { 2777 if (ret) {
2768 printk(KERN_WARNING "BTRFS: failed to recover balance\n"); 2778 printk(KERN_ERR "BTRFS: failed to recover balance\n");
2769 goto fail_block_groups; 2779 goto fail_block_groups;
2770 } 2780 }
2771 2781
@@ -3860,6 +3870,21 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3860 printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n", 3870 printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n",
3861 btrfs_super_log_root(sb)); 3871 btrfs_super_log_root(sb));
3862 3872
3873 /*
3874 * Check the lower bound, the alignment and other constraints are
3875 * checked later.
3876 */
3877 if (btrfs_super_nodesize(sb) < 4096) {
3878 printk(KERN_ERR "BTRFS: nodesize too small: %u < 4096\n",
3879 btrfs_super_nodesize(sb));
3880 ret = -EINVAL;
3881 }
3882 if (btrfs_super_sectorsize(sb) < 4096) {
3883 printk(KERN_ERR "BTRFS: sectorsize too small: %u < 4096\n",
3884 btrfs_super_sectorsize(sb));
3885 ret = -EINVAL;
3886 }
3887
3863 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) { 3888 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
3864 printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n", 3889 printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
3865 fs_info->fsid, sb->dev_item.fsid); 3890 fs_info->fsid, sb->dev_item.fsid);
@@ -3873,6 +3898,10 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3873 if (btrfs_super_num_devices(sb) > (1UL << 31)) 3898 if (btrfs_super_num_devices(sb) > (1UL << 31))
3874 printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", 3899 printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
3875 btrfs_super_num_devices(sb)); 3900 btrfs_super_num_devices(sb));
3901 if (btrfs_super_num_devices(sb) == 0) {
3902 printk(KERN_ERR "BTRFS: number of devices is 0\n");
3903 ret = -EINVAL;
3904 }
3876 3905
3877 if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) { 3906 if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
3878 printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n", 3907 printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
@@ -3881,6 +3910,25 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3881 } 3910 }
3882 3911
3883 /* 3912 /*
3913 * Obvious sys_chunk_array corruptions, it must hold at least one key
3914 * and one chunk
3915 */
3916 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
3917 printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n",
3918 btrfs_super_sys_array_size(sb),
3919 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
3920 ret = -EINVAL;
3921 }
3922 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
3923 + sizeof(struct btrfs_chunk)) {
3924 printk(KERN_ERR "BTRFS: system chunk array too small %u < %lu\n",
3925 btrfs_super_sys_array_size(sb),
3926 sizeof(struct btrfs_disk_key)
3927 + sizeof(struct btrfs_chunk));
3928 ret = -EINVAL;
3929 }
3930
3931 /*
3884 * The generation is a global counter, we'll trust it more than the others 3932 * The generation is a global counter, we'll trust it more than the others
3885 * but it's still possible that it's the one that's wrong. 3933 * but it's still possible that it's the one that's wrong.
3886 */ 3934 */
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 414651821fb3..27d44c0fd236 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -46,11 +46,11 @@ struct btrfs_fs_devices;
46 46
47struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, 47struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
48 u64 parent_transid); 48 u64 parent_transid);
49void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize); 49void readahead_tree_block(struct btrfs_root *root, u64 bytenr);
50int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, 50int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
51 int mirror_num, struct extent_buffer **eb); 51 int mirror_num, struct extent_buffer **eb);
52struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, 52struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
53 u64 bytenr, u32 blocksize); 53 u64 bytenr);
54void clean_tree_block(struct btrfs_trans_handle *trans, 54void clean_tree_block(struct btrfs_trans_handle *trans,
55 struct btrfs_root *root, struct extent_buffer *buf); 55 struct btrfs_root *root, struct extent_buffer *buf);
56int open_ctree(struct super_block *sb, 56int open_ctree(struct super_block *sb,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a684086c3c81..571f402d3fc4 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -74,8 +74,9 @@ enum {
74 RESERVE_ALLOC_NO_ACCOUNT = 2, 74 RESERVE_ALLOC_NO_ACCOUNT = 2,
75}; 75};
76 76
77static int update_block_group(struct btrfs_root *root, 77static int update_block_group(struct btrfs_trans_handle *trans,
78 u64 bytenr, u64 num_bytes, int alloc); 78 struct btrfs_root *root, u64 bytenr,
79 u64 num_bytes, int alloc);
79static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 80static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80 struct btrfs_root *root, 81 struct btrfs_root *root,
81 u64 bytenr, u64 num_bytes, u64 parent, 82 u64 bytenr, u64 num_bytes, u64 parent,
@@ -1925,7 +1926,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1925 */ 1926 */
1926 ret = 0; 1927 ret = 0;
1927 } 1928 }
1928 kfree(bbio); 1929 btrfs_put_bbio(bbio);
1929 } 1930 }
1930 1931
1931 if (actual_bytes) 1932 if (actual_bytes)
@@ -2768,7 +2769,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2768 struct btrfs_delayed_ref_head *head; 2769 struct btrfs_delayed_ref_head *head;
2769 int ret; 2770 int ret;
2770 int run_all = count == (unsigned long)-1; 2771 int run_all = count == (unsigned long)-1;
2771 int run_most = 0;
2772 2772
2773 /* We'll clean this up in btrfs_cleanup_transaction */ 2773 /* We'll clean this up in btrfs_cleanup_transaction */
2774 if (trans->aborted) 2774 if (trans->aborted)
@@ -2778,10 +2778,8 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2778 root = root->fs_info->tree_root; 2778 root = root->fs_info->tree_root;
2779 2779
2780 delayed_refs = &trans->transaction->delayed_refs; 2780 delayed_refs = &trans->transaction->delayed_refs;
2781 if (count == 0) { 2781 if (count == 0)
2782 count = atomic_read(&delayed_refs->num_entries) * 2; 2782 count = atomic_read(&delayed_refs->num_entries) * 2;
2783 run_most = 1;
2784 }
2785 2783
2786again: 2784again:
2787#ifdef SCRAMBLE_DELAYED_REFS 2785#ifdef SCRAMBLE_DELAYED_REFS
@@ -3315,120 +3313,42 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3315 struct btrfs_root *root) 3313 struct btrfs_root *root)
3316{ 3314{
3317 struct btrfs_block_group_cache *cache; 3315 struct btrfs_block_group_cache *cache;
3318 int err = 0; 3316 struct btrfs_transaction *cur_trans = trans->transaction;
3317 int ret = 0;
3319 struct btrfs_path *path; 3318 struct btrfs_path *path;
3320 u64 last = 0; 3319
3320 if (list_empty(&cur_trans->dirty_bgs))
3321 return 0;
3321 3322
3322 path = btrfs_alloc_path(); 3323 path = btrfs_alloc_path();
3323 if (!path) 3324 if (!path)
3324 return -ENOMEM; 3325 return -ENOMEM;
3325 3326
3326again: 3327 /*
3327 while (1) { 3328 * We don't need the lock here since we are protected by the transaction
3328 cache = btrfs_lookup_first_block_group(root->fs_info, last); 3329 * commit. We want to do the cache_save_setup first and then run the
3329 while (cache) { 3330 * delayed refs to make sure we have the best chance at doing this all
3330 if (cache->disk_cache_state == BTRFS_DC_CLEAR) 3331 * in one shot.
3331 break; 3332 */
3332 cache = next_block_group(root, cache); 3333 while (!list_empty(&cur_trans->dirty_bgs)) {
3333 } 3334 cache = list_first_entry(&cur_trans->dirty_bgs,
3334 if (!cache) { 3335 struct btrfs_block_group_cache,
3335 if (last == 0) 3336 dirty_list);
3336 break; 3337 list_del_init(&cache->dirty_list);
3337 last = 0; 3338 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3338 continue; 3339 cache_save_setup(cache, trans, path);
3339 } 3340 if (!ret)
3340 err = cache_save_setup(cache, trans, path); 3341 ret = btrfs_run_delayed_refs(trans, root,
3341 last = cache->key.objectid + cache->key.offset; 3342 (unsigned long) -1);
3342 btrfs_put_block_group(cache); 3343 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP)
3343 } 3344 btrfs_write_out_cache(root, trans, cache, path);
3344 3345 if (!ret)
3345 while (1) { 3346 ret = write_one_cache_group(trans, root, path, cache);
3346 if (last == 0) {
3347 err = btrfs_run_delayed_refs(trans, root,
3348 (unsigned long)-1);
3349 if (err) /* File system offline */
3350 goto out;
3351 }
3352
3353 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3354 while (cache) {
3355 if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3356 btrfs_put_block_group(cache);
3357 goto again;
3358 }
3359
3360 if (cache->dirty)
3361 break;
3362 cache = next_block_group(root, cache);
3363 }
3364 if (!cache) {
3365 if (last == 0)
3366 break;
3367 last = 0;
3368 continue;
3369 }
3370
3371 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3372 cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3373 cache->dirty = 0;
3374 last = cache->key.objectid + cache->key.offset;
3375
3376 err = write_one_cache_group(trans, root, path, cache);
3377 btrfs_put_block_group(cache);
3378 if (err) /* File system offline */
3379 goto out;
3380 }
3381
3382 while (1) {
3383 /*
3384 * I don't think this is needed since we're just marking our
3385 * preallocated extent as written, but just in case it can't
3386 * hurt.
3387 */
3388 if (last == 0) {
3389 err = btrfs_run_delayed_refs(trans, root,
3390 (unsigned long)-1);
3391 if (err) /* File system offline */
3392 goto out;
3393 }
3394
3395 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3396 while (cache) {
3397 /*
3398 * Really this shouldn't happen, but it could if we
3399 * couldn't write the entire preallocated extent and
3400 * splitting the extent resulted in a new block.
3401 */
3402 if (cache->dirty) {
3403 btrfs_put_block_group(cache);
3404 goto again;
3405 }
3406 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3407 break;
3408 cache = next_block_group(root, cache);
3409 }
3410 if (!cache) {
3411 if (last == 0)
3412 break;
3413 last = 0;
3414 continue;
3415 }
3416
3417 err = btrfs_write_out_cache(root, trans, cache, path);
3418
3419 /*
3420 * If we didn't have an error then the cache state is still
3421 * NEED_WRITE, so we can set it to WRITTEN.
3422 */
3423 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3424 cache->disk_cache_state = BTRFS_DC_WRITTEN;
3425 last = cache->key.objectid + cache->key.offset;
3426 btrfs_put_block_group(cache); 3347 btrfs_put_block_group(cache);
3427 } 3348 }
3428out:
3429 3349
3430 btrfs_free_path(path); 3350 btrfs_free_path(path);
3431 return err; 3351 return ret;
3432} 3352}
3433 3353
3434int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) 3354int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
@@ -5043,19 +4963,25 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5043/** 4963/**
5044 * drop_outstanding_extent - drop an outstanding extent 4964 * drop_outstanding_extent - drop an outstanding extent
5045 * @inode: the inode we're dropping the extent for 4965 * @inode: the inode we're dropping the extent for
4966 * @num_bytes: the number of bytes we're relaseing.
5046 * 4967 *
5047 * This is called when we are freeing up an outstanding extent, either called 4968 * This is called when we are freeing up an outstanding extent, either called
5048 * after an error or after an extent is written. This will return the number of 4969 * after an error or after an extent is written. This will return the number of
5049 * reserved extents that need to be freed. This must be called with 4970 * reserved extents that need to be freed. This must be called with
5050 * BTRFS_I(inode)->lock held. 4971 * BTRFS_I(inode)->lock held.
5051 */ 4972 */
5052static unsigned drop_outstanding_extent(struct inode *inode) 4973static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5053{ 4974{
5054 unsigned drop_inode_space = 0; 4975 unsigned drop_inode_space = 0;
5055 unsigned dropped_extents = 0; 4976 unsigned dropped_extents = 0;
4977 unsigned num_extents = 0;
5056 4978
5057 BUG_ON(!BTRFS_I(inode)->outstanding_extents); 4979 num_extents = (unsigned)div64_u64(num_bytes +
5058 BTRFS_I(inode)->outstanding_extents--; 4980 BTRFS_MAX_EXTENT_SIZE - 1,
4981 BTRFS_MAX_EXTENT_SIZE);
4982 ASSERT(num_extents);
4983 ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
4984 BTRFS_I(inode)->outstanding_extents -= num_extents;
5059 4985
5060 if (BTRFS_I(inode)->outstanding_extents == 0 && 4986 if (BTRFS_I(inode)->outstanding_extents == 0 &&
5061 test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED, 4987 test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
@@ -5226,7 +5152,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5226 5152
5227out_fail: 5153out_fail:
5228 spin_lock(&BTRFS_I(inode)->lock); 5154 spin_lock(&BTRFS_I(inode)->lock);
5229 dropped = drop_outstanding_extent(inode); 5155 dropped = drop_outstanding_extent(inode, num_bytes);
5230 /* 5156 /*
5231 * If the inodes csum_bytes is the same as the original 5157 * If the inodes csum_bytes is the same as the original
5232 * csum_bytes then we know we haven't raced with any free()ers 5158 * csum_bytes then we know we haven't raced with any free()ers
@@ -5305,7 +5231,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5305 5231
5306 num_bytes = ALIGN(num_bytes, root->sectorsize); 5232 num_bytes = ALIGN(num_bytes, root->sectorsize);
5307 spin_lock(&BTRFS_I(inode)->lock); 5233 spin_lock(&BTRFS_I(inode)->lock);
5308 dropped = drop_outstanding_extent(inode); 5234 dropped = drop_outstanding_extent(inode, num_bytes);
5309 5235
5310 if (num_bytes) 5236 if (num_bytes)
5311 to_free = calc_csum_metadata_size(inode, num_bytes, 0); 5237 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
@@ -5375,8 +5301,9 @@ void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5375 btrfs_free_reserved_data_space(inode, num_bytes); 5301 btrfs_free_reserved_data_space(inode, num_bytes);
5376} 5302}
5377 5303
5378static int update_block_group(struct btrfs_root *root, 5304static int update_block_group(struct btrfs_trans_handle *trans,
5379 u64 bytenr, u64 num_bytes, int alloc) 5305 struct btrfs_root *root, u64 bytenr,
5306 u64 num_bytes, int alloc)
5380{ 5307{
5381 struct btrfs_block_group_cache *cache = NULL; 5308 struct btrfs_block_group_cache *cache = NULL;
5382 struct btrfs_fs_info *info = root->fs_info; 5309 struct btrfs_fs_info *info = root->fs_info;
@@ -5414,6 +5341,14 @@ static int update_block_group(struct btrfs_root *root,
5414 if (!alloc && cache->cached == BTRFS_CACHE_NO) 5341 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5415 cache_block_group(cache, 1); 5342 cache_block_group(cache, 1);
5416 5343
5344 spin_lock(&trans->transaction->dirty_bgs_lock);
5345 if (list_empty(&cache->dirty_list)) {
5346 list_add_tail(&cache->dirty_list,
5347 &trans->transaction->dirty_bgs);
5348 btrfs_get_block_group(cache);
5349 }
5350 spin_unlock(&trans->transaction->dirty_bgs_lock);
5351
5417 byte_in_group = bytenr - cache->key.objectid; 5352 byte_in_group = bytenr - cache->key.objectid;
5418 WARN_ON(byte_in_group > cache->key.offset); 5353 WARN_ON(byte_in_group > cache->key.offset);
5419 5354
@@ -5424,7 +5359,6 @@ static int update_block_group(struct btrfs_root *root,
5424 cache->disk_cache_state < BTRFS_DC_CLEAR) 5359 cache->disk_cache_state < BTRFS_DC_CLEAR)
5425 cache->disk_cache_state = BTRFS_DC_CLEAR; 5360 cache->disk_cache_state = BTRFS_DC_CLEAR;
5426 5361
5427 cache->dirty = 1;
5428 old_val = btrfs_block_group_used(&cache->item); 5362 old_val = btrfs_block_group_used(&cache->item);
5429 num_bytes = min(total, cache->key.offset - byte_in_group); 5363 num_bytes = min(total, cache->key.offset - byte_in_group);
5430 if (alloc) { 5364 if (alloc) {
@@ -5807,10 +5741,13 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5807 unpin = &fs_info->freed_extents[0]; 5741 unpin = &fs_info->freed_extents[0];
5808 5742
5809 while (1) { 5743 while (1) {
5744 mutex_lock(&fs_info->unused_bg_unpin_mutex);
5810 ret = find_first_extent_bit(unpin, 0, &start, &end, 5745 ret = find_first_extent_bit(unpin, 0, &start, &end,
5811 EXTENT_DIRTY, NULL); 5746 EXTENT_DIRTY, NULL);
5812 if (ret) 5747 if (ret) {
5748 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
5813 break; 5749 break;
5750 }
5814 5751
5815 if (btrfs_test_opt(root, DISCARD)) 5752 if (btrfs_test_opt(root, DISCARD))
5816 ret = btrfs_discard_extent(root, start, 5753 ret = btrfs_discard_extent(root, start,
@@ -5818,6 +5755,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5818 5755
5819 clear_extent_dirty(unpin, start, end, GFP_NOFS); 5756 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5820 unpin_extent_range(root, start, end, true); 5757 unpin_extent_range(root, start, end, true);
5758 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
5821 cond_resched(); 5759 cond_resched();
5822 } 5760 }
5823 5761
@@ -6103,7 +6041,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6103 } 6041 }
6104 } 6042 }
6105 6043
6106 ret = update_block_group(root, bytenr, num_bytes, 0); 6044 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6107 if (ret) { 6045 if (ret) {
6108 btrfs_abort_transaction(trans, extent_root, ret); 6046 btrfs_abort_transaction(trans, extent_root, ret);
6109 goto out; 6047 goto out;
@@ -6205,7 +6143,6 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6205 struct extent_buffer *buf, 6143 struct extent_buffer *buf,
6206 u64 parent, int last_ref) 6144 u64 parent, int last_ref)
6207{ 6145{
6208 struct btrfs_block_group_cache *cache = NULL;
6209 int pin = 1; 6146 int pin = 1;
6210 int ret; 6147 int ret;
6211 6148
@@ -6221,17 +6158,20 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6221 if (!last_ref) 6158 if (!last_ref)
6222 return; 6159 return;
6223 6160
6224 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6225
6226 if (btrfs_header_generation(buf) == trans->transid) { 6161 if (btrfs_header_generation(buf) == trans->transid) {
6162 struct btrfs_block_group_cache *cache;
6163
6227 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 6164 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6228 ret = check_ref_cleanup(trans, root, buf->start); 6165 ret = check_ref_cleanup(trans, root, buf->start);
6229 if (!ret) 6166 if (!ret)
6230 goto out; 6167 goto out;
6231 } 6168 }
6232 6169
6170 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6171
6233 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { 6172 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6234 pin_down_extent(root, cache, buf->start, buf->len, 1); 6173 pin_down_extent(root, cache, buf->start, buf->len, 1);
6174 btrfs_put_block_group(cache);
6235 goto out; 6175 goto out;
6236 } 6176 }
6237 6177
@@ -6239,6 +6179,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6239 6179
6240 btrfs_add_free_space(cache, buf->start, buf->len); 6180 btrfs_add_free_space(cache, buf->start, buf->len);
6241 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0); 6181 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6182 btrfs_put_block_group(cache);
6242 trace_btrfs_reserved_extent_free(root, buf->start, buf->len); 6183 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6243 pin = 0; 6184 pin = 0;
6244 } 6185 }
@@ -6253,7 +6194,6 @@ out:
6253 * anymore. 6194 * anymore.
6254 */ 6195 */
6255 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); 6196 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6256 btrfs_put_block_group(cache);
6257} 6197}
6258 6198
6259/* Can return -ENOMEM */ 6199/* Can return -ENOMEM */
@@ -7063,7 +7003,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7063 if (ret) 7003 if (ret)
7064 return ret; 7004 return ret;
7065 7005
7066 ret = update_block_group(root, ins->objectid, ins->offset, 1); 7006 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7067 if (ret) { /* -ENOENT, logic error */ 7007 if (ret) { /* -ENOENT, logic error */
7068 btrfs_err(fs_info, "update block group failed for %llu %llu", 7008 btrfs_err(fs_info, "update block group failed for %llu %llu",
7069 ins->objectid, ins->offset); 7009 ins->objectid, ins->offset);
@@ -7152,7 +7092,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7152 return ret; 7092 return ret;
7153 } 7093 }
7154 7094
7155 ret = update_block_group(root, ins->objectid, root->nodesize, 1); 7095 ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7096 1);
7156 if (ret) { /* -ENOENT, logic error */ 7097 if (ret) { /* -ENOENT, logic error */
7157 btrfs_err(fs_info, "update block group failed for %llu %llu", 7098 btrfs_err(fs_info, "update block group failed for %llu %llu",
7158 ins->objectid, ins->offset); 7099 ins->objectid, ins->offset);
@@ -7217,11 +7158,11 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7217 7158
7218static struct extent_buffer * 7159static struct extent_buffer *
7219btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, 7160btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7220 u64 bytenr, u32 blocksize, int level) 7161 u64 bytenr, int level)
7221{ 7162{
7222 struct extent_buffer *buf; 7163 struct extent_buffer *buf;
7223 7164
7224 buf = btrfs_find_create_tree_block(root, bytenr, blocksize); 7165 buf = btrfs_find_create_tree_block(root, bytenr);
7225 if (!buf) 7166 if (!buf)
7226 return ERR_PTR(-ENOMEM); 7167 return ERR_PTR(-ENOMEM);
7227 btrfs_set_header_generation(buf, trans->transid); 7168 btrfs_set_header_generation(buf, trans->transid);
@@ -7340,7 +7281,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7340 7281
7341 if (btrfs_test_is_dummy_root(root)) { 7282 if (btrfs_test_is_dummy_root(root)) {
7342 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr, 7283 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7343 blocksize, level); 7284 level);
7344 if (!IS_ERR(buf)) 7285 if (!IS_ERR(buf))
7345 root->alloc_bytenr += blocksize; 7286 root->alloc_bytenr += blocksize;
7346 return buf; 7287 return buf;
@@ -7357,8 +7298,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7357 return ERR_PTR(ret); 7298 return ERR_PTR(ret);
7358 } 7299 }
7359 7300
7360 buf = btrfs_init_new_buffer(trans, root, ins.objectid, 7301 buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7361 blocksize, level);
7362 BUG_ON(IS_ERR(buf)); /* -ENOMEM */ 7302 BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7363 7303
7364 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 7304 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
@@ -7487,7 +7427,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7487 continue; 7427 continue;
7488 } 7428 }
7489reada: 7429reada:
7490 readahead_tree_block(root, bytenr, blocksize); 7430 readahead_tree_block(root, bytenr);
7491 nread++; 7431 nread++;
7492 } 7432 }
7493 wc->reada_slot = slot; 7433 wc->reada_slot = slot;
@@ -7828,7 +7768,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7828 7768
7829 next = btrfs_find_tree_block(root, bytenr); 7769 next = btrfs_find_tree_block(root, bytenr);
7830 if (!next) { 7770 if (!next) {
7831 next = btrfs_find_create_tree_block(root, bytenr, blocksize); 7771 next = btrfs_find_create_tree_block(root, bytenr);
7832 if (!next) 7772 if (!next)
7833 return -ENOMEM; 7773 return -ENOMEM;
7834 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next, 7774 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
@@ -8548,14 +8488,6 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
8548 if (IS_ERR(trans)) 8488 if (IS_ERR(trans))
8549 return PTR_ERR(trans); 8489 return PTR_ERR(trans);
8550 8490
8551 alloc_flags = update_block_group_flags(root, cache->flags);
8552 if (alloc_flags != cache->flags) {
8553 ret = do_chunk_alloc(trans, root, alloc_flags,
8554 CHUNK_ALLOC_FORCE);
8555 if (ret < 0)
8556 goto out;
8557 }
8558
8559 ret = set_block_group_ro(cache, 0); 8491 ret = set_block_group_ro(cache, 0);
8560 if (!ret) 8492 if (!ret)
8561 goto out; 8493 goto out;
@@ -8566,6 +8498,11 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
8566 goto out; 8498 goto out;
8567 ret = set_block_group_ro(cache, 0); 8499 ret = set_block_group_ro(cache, 0);
8568out: 8500out:
8501 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
8502 alloc_flags = update_block_group_flags(root, cache->flags);
8503 check_system_chunk(trans, root, alloc_flags);
8504 }
8505
8569 btrfs_end_transaction(trans, root); 8506 btrfs_end_transaction(trans, root);
8570 return ret; 8507 return ret;
8571} 8508}
@@ -9005,6 +8942,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9005 INIT_LIST_HEAD(&cache->cluster_list); 8942 INIT_LIST_HEAD(&cache->cluster_list);
9006 INIT_LIST_HEAD(&cache->bg_list); 8943 INIT_LIST_HEAD(&cache->bg_list);
9007 INIT_LIST_HEAD(&cache->ro_list); 8944 INIT_LIST_HEAD(&cache->ro_list);
8945 INIT_LIST_HEAD(&cache->dirty_list);
9008 btrfs_init_free_space_ctl(cache); 8946 btrfs_init_free_space_ctl(cache);
9009 atomic_set(&cache->trimming, 0); 8947 atomic_set(&cache->trimming, 0);
9010 8948
@@ -9068,9 +9006,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
9068 * b) Setting 'dirty flag' makes sure that we flush 9006 * b) Setting 'dirty flag' makes sure that we flush
9069 * the new space cache info onto disk. 9007 * the new space cache info onto disk.
9070 */ 9008 */
9071 cache->disk_cache_state = BTRFS_DC_CLEAR;
9072 if (btrfs_test_opt(root, SPACE_CACHE)) 9009 if (btrfs_test_opt(root, SPACE_CACHE))
9073 cache->dirty = 1; 9010 cache->disk_cache_state = BTRFS_DC_CLEAR;
9074 } 9011 }
9075 9012
9076 read_extent_buffer(leaf, &cache->item, 9013 read_extent_buffer(leaf, &cache->item,
@@ -9460,6 +9397,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9460 } 9397 }
9461 } 9398 }
9462 9399
9400 spin_lock(&trans->transaction->dirty_bgs_lock);
9401 if (!list_empty(&block_group->dirty_list)) {
9402 list_del_init(&block_group->dirty_list);
9403 btrfs_put_block_group(block_group);
9404 }
9405 spin_unlock(&trans->transaction->dirty_bgs_lock);
9406
9463 btrfs_remove_free_space_cache(block_group); 9407 btrfs_remove_free_space_cache(block_group);
9464 9408
9465 spin_lock(&block_group->space_info->lock); 9409 spin_lock(&block_group->space_info->lock);
@@ -9611,7 +9555,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9611 * Want to do this before we do anything else so we can recover 9555 * Want to do this before we do anything else so we can recover
9612 * properly if we fail to join the transaction. 9556 * properly if we fail to join the transaction.
9613 */ 9557 */
9614 trans = btrfs_join_transaction(root); 9558 /* 1 for btrfs_orphan_reserve_metadata() */
9559 trans = btrfs_start_transaction(root, 1);
9615 if (IS_ERR(trans)) { 9560 if (IS_ERR(trans)) {
9616 btrfs_set_block_group_rw(root, block_group); 9561 btrfs_set_block_group_rw(root, block_group);
9617 ret = PTR_ERR(trans); 9562 ret = PTR_ERR(trans);
@@ -9624,18 +9569,33 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9624 */ 9569 */
9625 start = block_group->key.objectid; 9570 start = block_group->key.objectid;
9626 end = start + block_group->key.offset - 1; 9571 end = start + block_group->key.offset - 1;
9572 /*
9573 * Hold the unused_bg_unpin_mutex lock to avoid racing with
9574 * btrfs_finish_extent_commit(). If we are at transaction N,
9575 * another task might be running finish_extent_commit() for the
9576 * previous transaction N - 1, and have seen a range belonging
9577 * to the block group in freed_extents[] before we were able to
9578 * clear the whole block group range from freed_extents[]. This
9579 * means that task can lookup for the block group after we
9580 * unpinned it from freed_extents[] and removed it, leading to
9581 * a BUG_ON() at btrfs_unpin_extent_range().
9582 */
9583 mutex_lock(&fs_info->unused_bg_unpin_mutex);
9627 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end, 9584 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
9628 EXTENT_DIRTY, GFP_NOFS); 9585 EXTENT_DIRTY, GFP_NOFS);
9629 if (ret) { 9586 if (ret) {
9587 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9630 btrfs_set_block_group_rw(root, block_group); 9588 btrfs_set_block_group_rw(root, block_group);
9631 goto end_trans; 9589 goto end_trans;
9632 } 9590 }
9633 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end, 9591 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
9634 EXTENT_DIRTY, GFP_NOFS); 9592 EXTENT_DIRTY, GFP_NOFS);
9635 if (ret) { 9593 if (ret) {
9594 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9636 btrfs_set_block_group_rw(root, block_group); 9595 btrfs_set_block_group_rw(root, block_group);
9637 goto end_trans; 9596 goto end_trans;
9638 } 9597 }
9598 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
9639 9599
9640 /* Reset pinned so btrfs_put_block_group doesn't complain */ 9600 /* Reset pinned so btrfs_put_block_group doesn't complain */
9641 block_group->pinned = 0; 9601 block_group->pinned = 0;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c73df6a7c9b6..c7233ff1d533 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -64,7 +64,7 @@ void btrfs_leak_debug_check(void)
64 64
65 while (!list_empty(&states)) { 65 while (!list_empty(&states)) {
66 state = list_entry(states.next, struct extent_state, leak_list); 66 state = list_entry(states.next, struct extent_state, leak_list);
67 pr_err("BTRFS: state leak: start %llu end %llu state %lu in tree %d refs %d\n", 67 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
68 state->start, state->end, state->state, 68 state->start, state->end, state->state,
69 extent_state_in_tree(state), 69 extent_state_in_tree(state),
70 atomic_read(&state->refs)); 70 atomic_read(&state->refs));
@@ -396,21 +396,21 @@ static void merge_state(struct extent_io_tree *tree,
396} 396}
397 397
398static void set_state_cb(struct extent_io_tree *tree, 398static void set_state_cb(struct extent_io_tree *tree,
399 struct extent_state *state, unsigned long *bits) 399 struct extent_state *state, unsigned *bits)
400{ 400{
401 if (tree->ops && tree->ops->set_bit_hook) 401 if (tree->ops && tree->ops->set_bit_hook)
402 tree->ops->set_bit_hook(tree->mapping->host, state, bits); 402 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
403} 403}
404 404
405static void clear_state_cb(struct extent_io_tree *tree, 405static void clear_state_cb(struct extent_io_tree *tree,
406 struct extent_state *state, unsigned long *bits) 406 struct extent_state *state, unsigned *bits)
407{ 407{
408 if (tree->ops && tree->ops->clear_bit_hook) 408 if (tree->ops && tree->ops->clear_bit_hook)
409 tree->ops->clear_bit_hook(tree->mapping->host, state, bits); 409 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
410} 410}
411 411
412static void set_state_bits(struct extent_io_tree *tree, 412static void set_state_bits(struct extent_io_tree *tree,
413 struct extent_state *state, unsigned long *bits); 413 struct extent_state *state, unsigned *bits);
414 414
415/* 415/*
416 * insert an extent_state struct into the tree. 'bits' are set on the 416 * insert an extent_state struct into the tree. 'bits' are set on the
@@ -426,7 +426,7 @@ static int insert_state(struct extent_io_tree *tree,
426 struct extent_state *state, u64 start, u64 end, 426 struct extent_state *state, u64 start, u64 end,
427 struct rb_node ***p, 427 struct rb_node ***p,
428 struct rb_node **parent, 428 struct rb_node **parent,
429 unsigned long *bits) 429 unsigned *bits)
430{ 430{
431 struct rb_node *node; 431 struct rb_node *node;
432 432
@@ -511,10 +511,10 @@ static struct extent_state *next_state(struct extent_state *state)
511 */ 511 */
512static struct extent_state *clear_state_bit(struct extent_io_tree *tree, 512static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
513 struct extent_state *state, 513 struct extent_state *state,
514 unsigned long *bits, int wake) 514 unsigned *bits, int wake)
515{ 515{
516 struct extent_state *next; 516 struct extent_state *next;
517 unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS; 517 unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
518 518
519 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { 519 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
520 u64 range = state->end - state->start + 1; 520 u64 range = state->end - state->start + 1;
@@ -570,7 +570,7 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
570 * This takes the tree lock, and returns 0 on success and < 0 on error. 570 * This takes the tree lock, and returns 0 on success and < 0 on error.
571 */ 571 */
572int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 572int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
573 unsigned long bits, int wake, int delete, 573 unsigned bits, int wake, int delete,
574 struct extent_state **cached_state, 574 struct extent_state **cached_state,
575 gfp_t mask) 575 gfp_t mask)
576{ 576{
@@ -789,9 +789,9 @@ out:
789 789
790static void set_state_bits(struct extent_io_tree *tree, 790static void set_state_bits(struct extent_io_tree *tree,
791 struct extent_state *state, 791 struct extent_state *state,
792 unsigned long *bits) 792 unsigned *bits)
793{ 793{
794 unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS; 794 unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
795 795
796 set_state_cb(tree, state, bits); 796 set_state_cb(tree, state, bits);
797 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { 797 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
@@ -803,7 +803,7 @@ static void set_state_bits(struct extent_io_tree *tree,
803 803
804static void cache_state_if_flags(struct extent_state *state, 804static void cache_state_if_flags(struct extent_state *state,
805 struct extent_state **cached_ptr, 805 struct extent_state **cached_ptr,
806 const u64 flags) 806 unsigned flags)
807{ 807{
808 if (cached_ptr && !(*cached_ptr)) { 808 if (cached_ptr && !(*cached_ptr)) {
809 if (!flags || (state->state & flags)) { 809 if (!flags || (state->state & flags)) {
@@ -833,7 +833,7 @@ static void cache_state(struct extent_state *state,
833 833
834static int __must_check 834static int __must_check
835__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 835__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
836 unsigned long bits, unsigned long exclusive_bits, 836 unsigned bits, unsigned exclusive_bits,
837 u64 *failed_start, struct extent_state **cached_state, 837 u64 *failed_start, struct extent_state **cached_state,
838 gfp_t mask) 838 gfp_t mask)
839{ 839{
@@ -1034,7 +1034,7 @@ search_again:
1034} 1034}
1035 1035
1036int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 1036int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1037 unsigned long bits, u64 * failed_start, 1037 unsigned bits, u64 * failed_start,
1038 struct extent_state **cached_state, gfp_t mask) 1038 struct extent_state **cached_state, gfp_t mask)
1039{ 1039{
1040 return __set_extent_bit(tree, start, end, bits, 0, failed_start, 1040 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
@@ -1060,7 +1060,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1060 * boundary bits like LOCK. 1060 * boundary bits like LOCK.
1061 */ 1061 */
1062int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 1062int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1063 unsigned long bits, unsigned long clear_bits, 1063 unsigned bits, unsigned clear_bits,
1064 struct extent_state **cached_state, gfp_t mask) 1064 struct extent_state **cached_state, gfp_t mask)
1065{ 1065{
1066 struct extent_state *state; 1066 struct extent_state *state;
@@ -1268,14 +1268,14 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1268} 1268}
1269 1269
1270int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1270int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1271 unsigned long bits, gfp_t mask) 1271 unsigned bits, gfp_t mask)
1272{ 1272{
1273 return set_extent_bit(tree, start, end, bits, NULL, 1273 return set_extent_bit(tree, start, end, bits, NULL,
1274 NULL, mask); 1274 NULL, mask);
1275} 1275}
1276 1276
1277int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1277int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1278 unsigned long bits, gfp_t mask) 1278 unsigned bits, gfp_t mask)
1279{ 1279{
1280 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask); 1280 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1281} 1281}
@@ -1330,10 +1330,11 @@ int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1330 * us if waiting is desired. 1330 * us if waiting is desired.
1331 */ 1331 */
1332int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1332int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1333 unsigned long bits, struct extent_state **cached_state) 1333 unsigned bits, struct extent_state **cached_state)
1334{ 1334{
1335 int err; 1335 int err;
1336 u64 failed_start; 1336 u64 failed_start;
1337
1337 while (1) { 1338 while (1) {
1338 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, 1339 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1339 EXTENT_LOCKED, &failed_start, 1340 EXTENT_LOCKED, &failed_start,
@@ -1440,7 +1441,7 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1440 */ 1441 */
1441static struct extent_state * 1442static struct extent_state *
1442find_first_extent_bit_state(struct extent_io_tree *tree, 1443find_first_extent_bit_state(struct extent_io_tree *tree,
1443 u64 start, unsigned long bits) 1444 u64 start, unsigned bits)
1444{ 1445{
1445 struct rb_node *node; 1446 struct rb_node *node;
1446 struct extent_state *state; 1447 struct extent_state *state;
@@ -1474,7 +1475,7 @@ out:
1474 * If nothing was found, 1 is returned. If found something, return 0. 1475 * If nothing was found, 1 is returned. If found something, return 0.
1475 */ 1476 */
1476int find_first_extent_bit(struct extent_io_tree *tree, u64 start, 1477int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1477 u64 *start_ret, u64 *end_ret, unsigned long bits, 1478 u64 *start_ret, u64 *end_ret, unsigned bits,
1478 struct extent_state **cached_state) 1479 struct extent_state **cached_state)
1479{ 1480{
1480 struct extent_state *state; 1481 struct extent_state *state;
@@ -1753,7 +1754,7 @@ out_failed:
1753 1754
1754int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, 1755int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1755 struct page *locked_page, 1756 struct page *locked_page,
1756 unsigned long clear_bits, 1757 unsigned clear_bits,
1757 unsigned long page_ops) 1758 unsigned long page_ops)
1758{ 1759{
1759 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 1760 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
@@ -1810,7 +1811,7 @@ int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1810 */ 1811 */
1811u64 count_range_bits(struct extent_io_tree *tree, 1812u64 count_range_bits(struct extent_io_tree *tree,
1812 u64 *start, u64 search_end, u64 max_bytes, 1813 u64 *start, u64 search_end, u64 max_bytes,
1813 unsigned long bits, int contig) 1814 unsigned bits, int contig)
1814{ 1815{
1815 struct rb_node *node; 1816 struct rb_node *node;
1816 struct extent_state *state; 1817 struct extent_state *state;
@@ -1928,7 +1929,7 @@ out:
1928 * range is found set. 1929 * range is found set.
1929 */ 1930 */
1930int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, 1931int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1931 unsigned long bits, int filled, struct extent_state *cached) 1932 unsigned bits, int filled, struct extent_state *cached)
1932{ 1933{
1933 struct extent_state *state = NULL; 1934 struct extent_state *state = NULL;
1934 struct rb_node *node; 1935 struct rb_node *node;
@@ -2057,7 +2058,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2057 sector = bbio->stripes[mirror_num-1].physical >> 9; 2058 sector = bbio->stripes[mirror_num-1].physical >> 9;
2058 bio->bi_iter.bi_sector = sector; 2059 bio->bi_iter.bi_sector = sector;
2059 dev = bbio->stripes[mirror_num-1].dev; 2060 dev = bbio->stripes[mirror_num-1].dev;
2060 kfree(bbio); 2061 btrfs_put_bbio(bbio);
2061 if (!dev || !dev->bdev || !dev->writeable) { 2062 if (!dev || !dev->bdev || !dev->writeable) {
2062 bio_put(bio); 2063 bio_put(bio);
2063 return -EIO; 2064 return -EIO;
@@ -2816,8 +2817,10 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2816 bio_add_page(bio, page, page_size, offset) < page_size) { 2817 bio_add_page(bio, page, page_size, offset) < page_size) {
2817 ret = submit_one_bio(rw, bio, mirror_num, 2818 ret = submit_one_bio(rw, bio, mirror_num,
2818 prev_bio_flags); 2819 prev_bio_flags);
2819 if (ret < 0) 2820 if (ret < 0) {
2821 *bio_ret = NULL;
2820 return ret; 2822 return ret;
2823 }
2821 bio = NULL; 2824 bio = NULL;
2822 } else { 2825 } else {
2823 return 0; 2826 return 0;
@@ -3239,7 +3242,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
3239 page, 3242 page,
3240 &delalloc_start, 3243 &delalloc_start,
3241 &delalloc_end, 3244 &delalloc_end,
3242 128 * 1024 * 1024); 3245 BTRFS_MAX_EXTENT_SIZE);
3243 if (nr_delalloc == 0) { 3246 if (nr_delalloc == 0) {
3244 delalloc_start = delalloc_end + 1; 3247 delalloc_start = delalloc_end + 1;
3245 continue; 3248 continue;
@@ -4598,11 +4601,11 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4598 4601
4599static struct extent_buffer * 4602static struct extent_buffer *
4600__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, 4603__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4601 unsigned long len, gfp_t mask) 4604 unsigned long len)
4602{ 4605{
4603 struct extent_buffer *eb = NULL; 4606 struct extent_buffer *eb = NULL;
4604 4607
4605 eb = kmem_cache_zalloc(extent_buffer_cache, mask); 4608 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS);
4606 if (eb == NULL) 4609 if (eb == NULL)
4607 return NULL; 4610 return NULL;
4608 eb->start = start; 4611 eb->start = start;
@@ -4643,7 +4646,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4643 struct extent_buffer *new; 4646 struct extent_buffer *new;
4644 unsigned long num_pages = num_extent_pages(src->start, src->len); 4647 unsigned long num_pages = num_extent_pages(src->start, src->len);
4645 4648
4646 new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS); 4649 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
4647 if (new == NULL) 4650 if (new == NULL)
4648 return NULL; 4651 return NULL;
4649 4652
@@ -4666,13 +4669,26 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4666 return new; 4669 return new;
4667} 4670}
4668 4671
4669struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len) 4672struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4673 u64 start)
4670{ 4674{
4671 struct extent_buffer *eb; 4675 struct extent_buffer *eb;
4672 unsigned long num_pages = num_extent_pages(0, len); 4676 unsigned long len;
4677 unsigned long num_pages;
4673 unsigned long i; 4678 unsigned long i;
4674 4679
4675 eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS); 4680 if (!fs_info) {
4681 /*
4682 * Called only from tests that don't always have a fs_info
4683 * available, but we know that nodesize is 4096
4684 */
4685 len = 4096;
4686 } else {
4687 len = fs_info->tree_root->nodesize;
4688 }
4689 num_pages = num_extent_pages(0, len);
4690
4691 eb = __alloc_extent_buffer(fs_info, start, len);
4676 if (!eb) 4692 if (!eb)
4677 return NULL; 4693 return NULL;
4678 4694
@@ -4762,7 +4778,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4762 4778
4763#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4779#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4764struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, 4780struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4765 u64 start, unsigned long len) 4781 u64 start)
4766{ 4782{
4767 struct extent_buffer *eb, *exists = NULL; 4783 struct extent_buffer *eb, *exists = NULL;
4768 int ret; 4784 int ret;
@@ -4770,7 +4786,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4770 eb = find_extent_buffer(fs_info, start); 4786 eb = find_extent_buffer(fs_info, start);
4771 if (eb) 4787 if (eb)
4772 return eb; 4788 return eb;
4773 eb = alloc_dummy_extent_buffer(start, len); 4789 eb = alloc_dummy_extent_buffer(fs_info, start);
4774 if (!eb) 4790 if (!eb)
4775 return NULL; 4791 return NULL;
4776 eb->fs_info = fs_info; 4792 eb->fs_info = fs_info;
@@ -4808,8 +4824,9 @@ free_eb:
4808#endif 4824#endif
4809 4825
4810struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, 4826struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4811 u64 start, unsigned long len) 4827 u64 start)
4812{ 4828{
4829 unsigned long len = fs_info->tree_root->nodesize;
4813 unsigned long num_pages = num_extent_pages(start, len); 4830 unsigned long num_pages = num_extent_pages(start, len);
4814 unsigned long i; 4831 unsigned long i;
4815 unsigned long index = start >> PAGE_CACHE_SHIFT; 4832 unsigned long index = start >> PAGE_CACHE_SHIFT;
@@ -4824,7 +4841,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4824 if (eb) 4841 if (eb)
4825 return eb; 4842 return eb;
4826 4843
4827 eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS); 4844 eb = __alloc_extent_buffer(fs_info, start, len);
4828 if (!eb) 4845 if (!eb)
4829 return NULL; 4846 return NULL;
4830 4847
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index ece9ce87edff..695b0ccfb755 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -4,22 +4,22 @@
4#include <linux/rbtree.h> 4#include <linux/rbtree.h>
5 5
6/* bits for the extent state */ 6/* bits for the extent state */
7#define EXTENT_DIRTY 1 7#define EXTENT_DIRTY (1U << 0)
8#define EXTENT_WRITEBACK (1 << 1) 8#define EXTENT_WRITEBACK (1U << 1)
9#define EXTENT_UPTODATE (1 << 2) 9#define EXTENT_UPTODATE (1U << 2)
10#define EXTENT_LOCKED (1 << 3) 10#define EXTENT_LOCKED (1U << 3)
11#define EXTENT_NEW (1 << 4) 11#define EXTENT_NEW (1U << 4)
12#define EXTENT_DELALLOC (1 << 5) 12#define EXTENT_DELALLOC (1U << 5)
13#define EXTENT_DEFRAG (1 << 6) 13#define EXTENT_DEFRAG (1U << 6)
14#define EXTENT_BOUNDARY (1 << 9) 14#define EXTENT_BOUNDARY (1U << 9)
15#define EXTENT_NODATASUM (1 << 10) 15#define EXTENT_NODATASUM (1U << 10)
16#define EXTENT_DO_ACCOUNTING (1 << 11) 16#define EXTENT_DO_ACCOUNTING (1U << 11)
17#define EXTENT_FIRST_DELALLOC (1 << 12) 17#define EXTENT_FIRST_DELALLOC (1U << 12)
18#define EXTENT_NEED_WAIT (1 << 13) 18#define EXTENT_NEED_WAIT (1U << 13)
19#define EXTENT_DAMAGED (1 << 14) 19#define EXTENT_DAMAGED (1U << 14)
20#define EXTENT_NORESERVE (1 << 15) 20#define EXTENT_NORESERVE (1U << 15)
21#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) 21#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
22#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC) 22#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
23 23
24/* 24/*
25 * flags for bio submission. The high bits indicate the compression 25 * flags for bio submission. The high bits indicate the compression
@@ -81,9 +81,9 @@ struct extent_io_ops {
81 int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, 81 int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
82 struct extent_state *state, int uptodate); 82 struct extent_state *state, int uptodate);
83 void (*set_bit_hook)(struct inode *inode, struct extent_state *state, 83 void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
84 unsigned long *bits); 84 unsigned *bits);
85 void (*clear_bit_hook)(struct inode *inode, struct extent_state *state, 85 void (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
86 unsigned long *bits); 86 unsigned *bits);
87 void (*merge_extent_hook)(struct inode *inode, 87 void (*merge_extent_hook)(struct inode *inode,
88 struct extent_state *new, 88 struct extent_state *new,
89 struct extent_state *other); 89 struct extent_state *other);
@@ -108,7 +108,7 @@ struct extent_state {
108 /* ADD NEW ELEMENTS AFTER THIS */ 108 /* ADD NEW ELEMENTS AFTER THIS */
109 wait_queue_head_t wq; 109 wait_queue_head_t wq;
110 atomic_t refs; 110 atomic_t refs;
111 unsigned long state; 111 unsigned state;
112 112
113 /* for use by the FS */ 113 /* for use by the FS */
114 u64 private; 114 u64 private;
@@ -188,7 +188,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
188int try_release_extent_buffer(struct page *page); 188int try_release_extent_buffer(struct page *page);
189int lock_extent(struct extent_io_tree *tree, u64 start, u64 end); 189int lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
190int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 190int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
191 unsigned long bits, struct extent_state **cached); 191 unsigned bits, struct extent_state **cached);
192int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end); 192int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end);
193int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, 193int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
194 struct extent_state **cached, gfp_t mask); 194 struct extent_state **cached, gfp_t mask);
@@ -202,21 +202,21 @@ void extent_io_exit(void);
202 202
203u64 count_range_bits(struct extent_io_tree *tree, 203u64 count_range_bits(struct extent_io_tree *tree,
204 u64 *start, u64 search_end, 204 u64 *start, u64 search_end,
205 u64 max_bytes, unsigned long bits, int contig); 205 u64 max_bytes, unsigned bits, int contig);
206 206
207void free_extent_state(struct extent_state *state); 207void free_extent_state(struct extent_state *state);
208int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, 208int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
209 unsigned long bits, int filled, 209 unsigned bits, int filled,
210 struct extent_state *cached_state); 210 struct extent_state *cached_state);
211int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 211int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
212 unsigned long bits, gfp_t mask); 212 unsigned bits, gfp_t mask);
213int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 213int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
214 unsigned long bits, int wake, int delete, 214 unsigned bits, int wake, int delete,
215 struct extent_state **cached, gfp_t mask); 215 struct extent_state **cached, gfp_t mask);
216int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 216int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
217 unsigned long bits, gfp_t mask); 217 unsigned bits, gfp_t mask);
218int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 218int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
219 unsigned long bits, u64 *failed_start, 219 unsigned bits, u64 *failed_start,
220 struct extent_state **cached_state, gfp_t mask); 220 struct extent_state **cached_state, gfp_t mask);
221int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 221int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
222 struct extent_state **cached_state, gfp_t mask); 222 struct extent_state **cached_state, gfp_t mask);
@@ -229,14 +229,14 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
229int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 229int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
230 gfp_t mask); 230 gfp_t mask);
231int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 231int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
232 unsigned long bits, unsigned long clear_bits, 232 unsigned bits, unsigned clear_bits,
233 struct extent_state **cached_state, gfp_t mask); 233 struct extent_state **cached_state, gfp_t mask);
234int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, 234int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
235 struct extent_state **cached_state, gfp_t mask); 235 struct extent_state **cached_state, gfp_t mask);
236int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end, 236int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
237 struct extent_state **cached_state, gfp_t mask); 237 struct extent_state **cached_state, gfp_t mask);
238int find_first_extent_bit(struct extent_io_tree *tree, u64 start, 238int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
239 u64 *start_ret, u64 *end_ret, unsigned long bits, 239 u64 *start_ret, u64 *end_ret, unsigned bits,
240 struct extent_state **cached_state); 240 struct extent_state **cached_state);
241int extent_invalidatepage(struct extent_io_tree *tree, 241int extent_invalidatepage(struct extent_io_tree *tree,
242 struct page *page, unsigned long offset); 242 struct page *page, unsigned long offset);
@@ -262,8 +262,9 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
262void set_page_extent_mapped(struct page *page); 262void set_page_extent_mapped(struct page *page);
263 263
264struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, 264struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
265 u64 start, unsigned long len); 265 u64 start);
266struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len); 266struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
267 u64 start);
267struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); 268struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
268struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, 269struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
269 u64 start); 270 u64 start);
@@ -322,7 +323,7 @@ int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
322int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); 323int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
323int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, 324int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
324 struct page *locked_page, 325 struct page *locked_page,
325 unsigned long bits_to_clear, 326 unsigned bits_to_clear,
326 unsigned long page_ops); 327 unsigned long page_ops);
327struct bio * 328struct bio *
328btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, 329btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
@@ -377,5 +378,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode,
377 u64 *end, u64 max_bytes); 378 u64 *end, u64 max_bytes);
378#endif 379#endif
379struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, 380struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
380 u64 start, unsigned long len); 381 u64 start);
381#endif 382#endif
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index d6c03f7f136b..a71978578fa7 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -651,15 +651,13 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
651 struct io_ctl io_ctl; 651 struct io_ctl io_ctl;
652 struct btrfs_key key; 652 struct btrfs_key key;
653 struct btrfs_free_space *e, *n; 653 struct btrfs_free_space *e, *n;
654 struct list_head bitmaps; 654 LIST_HEAD(bitmaps);
655 u64 num_entries; 655 u64 num_entries;
656 u64 num_bitmaps; 656 u64 num_bitmaps;
657 u64 generation; 657 u64 generation;
658 u8 type; 658 u8 type;
659 int ret = 0; 659 int ret = 0;
660 660
661 INIT_LIST_HEAD(&bitmaps);
662
663 /* Nothing in the space cache, goodbye */ 661 /* Nothing in the space cache, goodbye */
664 if (!i_size_read(inode)) 662 if (!i_size_read(inode))
665 return 0; 663 return 0;
@@ -1243,6 +1241,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
1243 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 1241 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1244 struct inode *inode; 1242 struct inode *inode;
1245 int ret = 0; 1243 int ret = 0;
1244 enum btrfs_disk_cache_state dcs = BTRFS_DC_WRITTEN;
1246 1245
1247 root = root->fs_info->tree_root; 1246 root = root->fs_info->tree_root;
1248 1247
@@ -1266,9 +1265,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
1266 ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, 1265 ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
1267 path, block_group->key.objectid); 1266 path, block_group->key.objectid);
1268 if (ret) { 1267 if (ret) {
1269 spin_lock(&block_group->lock); 1268 dcs = BTRFS_DC_ERROR;
1270 block_group->disk_cache_state = BTRFS_DC_ERROR;
1271 spin_unlock(&block_group->lock);
1272 ret = 0; 1269 ret = 0;
1273#ifdef DEBUG 1270#ifdef DEBUG
1274 btrfs_err(root->fs_info, 1271 btrfs_err(root->fs_info,
@@ -1277,6 +1274,9 @@ int btrfs_write_out_cache(struct btrfs_root *root,
1277#endif 1274#endif
1278 } 1275 }
1279 1276
1277 spin_lock(&block_group->lock);
1278 block_group->disk_cache_state = dcs;
1279 spin_unlock(&block_group->lock);
1280 iput(inode); 1280 iput(inode);
1281 return ret; 1281 return ret;
1282} 1282}
@@ -2903,7 +2903,6 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
2903 trace_btrfs_find_cluster(block_group, offset, bytes, empty_size, 2903 trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
2904 min_bytes); 2904 min_bytes);
2905 2905
2906 INIT_LIST_HEAD(&bitmaps);
2907 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, 2906 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2908 bytes + empty_size, 2907 bytes + empty_size,
2909 cont1_bytes, min_bytes); 2908 cont1_bytes, min_bytes);
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 8ffa4783cbf4..265e03c73f4d 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -344,6 +344,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
344 return -ENOMEM; 344 return -ENOMEM;
345 345
346 path->leave_spinning = 1; 346 path->leave_spinning = 1;
347 path->skip_release_on_error = 1;
347 ret = btrfs_insert_empty_item(trans, root, path, &key, 348 ret = btrfs_insert_empty_item(trans, root, path, &key,
348 ins_len); 349 ins_len);
349 if (ret == -EEXIST) { 350 if (ret == -EEXIST) {
@@ -362,8 +363,12 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
362 ptr = (unsigned long)(ref + 1); 363 ptr = (unsigned long)(ref + 1);
363 ret = 0; 364 ret = 0;
364 } else if (ret < 0) { 365 } else if (ret < 0) {
365 if (ret == -EOVERFLOW) 366 if (ret == -EOVERFLOW) {
366 ret = -EMLINK; 367 if (find_name_in_backref(path, name, name_len, &ref))
368 ret = -EEXIST;
369 else
370 ret = -EMLINK;
371 }
367 goto out; 372 goto out;
368 } else { 373 } else {
369 ref = btrfs_item_ptr(path->nodes[0], path->slots[0], 374 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 54bcf639d1cf..a85c23dfcddb 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1530,10 +1530,45 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1530static void btrfs_split_extent_hook(struct inode *inode, 1530static void btrfs_split_extent_hook(struct inode *inode,
1531 struct extent_state *orig, u64 split) 1531 struct extent_state *orig, u64 split)
1532{ 1532{
1533 u64 size;
1534
1533 /* not delalloc, ignore it */ 1535 /* not delalloc, ignore it */
1534 if (!(orig->state & EXTENT_DELALLOC)) 1536 if (!(orig->state & EXTENT_DELALLOC))
1535 return; 1537 return;
1536 1538
1539 size = orig->end - orig->start + 1;
1540 if (size > BTRFS_MAX_EXTENT_SIZE) {
1541 u64 num_extents;
1542 u64 new_size;
1543
1544 /*
1545 * We need the largest size of the remaining extent to see if we
1546 * need to add a new outstanding extent. Think of the following
1547 * case
1548 *
1549 * [MEAX_EXTENT_SIZEx2 - 4k][4k]
1550 *
1551 * The new_size would just be 4k and we'd think we had enough
1552 * outstanding extents for this if we only took one side of the
1553 * split, same goes for the other direction. We need to see if
1554 * the larger size still is the same amount of extents as the
1555 * original size, because if it is we need to add a new
1556 * outstanding extent. But if we split up and the larger size
1557 * is less than the original then we are good to go since we've
1558 * already accounted for the extra extent in our original
1559 * accounting.
1560 */
1561 new_size = orig->end - split + 1;
1562 if ((split - orig->start) > new_size)
1563 new_size = split - orig->start;
1564
1565 num_extents = div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
1566 BTRFS_MAX_EXTENT_SIZE);
1567 if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1568 BTRFS_MAX_EXTENT_SIZE) < num_extents)
1569 return;
1570 }
1571
1537 spin_lock(&BTRFS_I(inode)->lock); 1572 spin_lock(&BTRFS_I(inode)->lock);
1538 BTRFS_I(inode)->outstanding_extents++; 1573 BTRFS_I(inode)->outstanding_extents++;
1539 spin_unlock(&BTRFS_I(inode)->lock); 1574 spin_unlock(&BTRFS_I(inode)->lock);
@@ -1549,10 +1584,34 @@ static void btrfs_merge_extent_hook(struct inode *inode,
1549 struct extent_state *new, 1584 struct extent_state *new,
1550 struct extent_state *other) 1585 struct extent_state *other)
1551{ 1586{
1587 u64 new_size, old_size;
1588 u64 num_extents;
1589
1552 /* not delalloc, ignore it */ 1590 /* not delalloc, ignore it */
1553 if (!(other->state & EXTENT_DELALLOC)) 1591 if (!(other->state & EXTENT_DELALLOC))
1554 return; 1592 return;
1555 1593
1594 old_size = other->end - other->start + 1;
1595 new_size = old_size + (new->end - new->start + 1);
1596
1597 /* we're not bigger than the max, unreserve the space and go */
1598 if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1599 spin_lock(&BTRFS_I(inode)->lock);
1600 BTRFS_I(inode)->outstanding_extents--;
1601 spin_unlock(&BTRFS_I(inode)->lock);
1602 return;
1603 }
1604
1605 /*
1606 * If we grew by another max_extent, just return, we want to keep that
1607 * reserved amount.
1608 */
1609 num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1610 BTRFS_MAX_EXTENT_SIZE);
1611 if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1612 BTRFS_MAX_EXTENT_SIZE) > num_extents)
1613 return;
1614
1556 spin_lock(&BTRFS_I(inode)->lock); 1615 spin_lock(&BTRFS_I(inode)->lock);
1557 BTRFS_I(inode)->outstanding_extents--; 1616 BTRFS_I(inode)->outstanding_extents--;
1558 spin_unlock(&BTRFS_I(inode)->lock); 1617 spin_unlock(&BTRFS_I(inode)->lock);
@@ -1604,7 +1663,7 @@ static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1604 * have pending delalloc work to be done. 1663 * have pending delalloc work to be done.
1605 */ 1664 */
1606static void btrfs_set_bit_hook(struct inode *inode, 1665static void btrfs_set_bit_hook(struct inode *inode,
1607 struct extent_state *state, unsigned long *bits) 1666 struct extent_state *state, unsigned *bits)
1608{ 1667{
1609 1668
1610 if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC)) 1669 if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
@@ -1645,9 +1704,11 @@ static void btrfs_set_bit_hook(struct inode *inode,
1645 */ 1704 */
1646static void btrfs_clear_bit_hook(struct inode *inode, 1705static void btrfs_clear_bit_hook(struct inode *inode,
1647 struct extent_state *state, 1706 struct extent_state *state,
1648 unsigned long *bits) 1707 unsigned *bits)
1649{ 1708{
1650 u64 len = state->end + 1 - state->start; 1709 u64 len = state->end + 1 - state->start;
1710 u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1,
1711 BTRFS_MAX_EXTENT_SIZE);
1651 1712
1652 spin_lock(&BTRFS_I(inode)->lock); 1713 spin_lock(&BTRFS_I(inode)->lock);
1653 if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) 1714 if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG))
@@ -1667,7 +1728,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
1667 *bits &= ~EXTENT_FIRST_DELALLOC; 1728 *bits &= ~EXTENT_FIRST_DELALLOC;
1668 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { 1729 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1669 spin_lock(&BTRFS_I(inode)->lock); 1730 spin_lock(&BTRFS_I(inode)->lock);
1670 BTRFS_I(inode)->outstanding_extents--; 1731 BTRFS_I(inode)->outstanding_extents -= num_extents;
1671 spin_unlock(&BTRFS_I(inode)->lock); 1732 spin_unlock(&BTRFS_I(inode)->lock);
1672 } 1733 }
1673 1734
@@ -2945,7 +3006,7 @@ static int __readpage_endio_check(struct inode *inode,
2945 return 0; 3006 return 0;
2946zeroit: 3007zeroit:
2947 if (__ratelimit(&_rs)) 3008 if (__ratelimit(&_rs))
2948 btrfs_info(BTRFS_I(inode)->root->fs_info, 3009 btrfs_warn(BTRFS_I(inode)->root->fs_info,
2949 "csum failed ino %llu off %llu csum %u expected csum %u", 3010 "csum failed ino %llu off %llu csum %u expected csum %u",
2950 btrfs_ino(inode), start, csum, csum_expected); 3011 btrfs_ino(inode), start, csum, csum_expected);
2951 memset(kaddr + pgoff, 1, len); 3012 memset(kaddr + pgoff, 1, len);
@@ -3407,7 +3468,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
3407 3468
3408out: 3469out:
3409 if (ret) 3470 if (ret)
3410 btrfs_crit(root->fs_info, 3471 btrfs_err(root->fs_info,
3411 "could not do orphan cleanup %d", ret); 3472 "could not do orphan cleanup %d", ret);
3412 btrfs_free_path(path); 3473 btrfs_free_path(path);
3413 return ret; 3474 return ret;
@@ -3490,7 +3551,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
3490 struct btrfs_path *path; 3551 struct btrfs_path *path;
3491 struct extent_buffer *leaf; 3552 struct extent_buffer *leaf;
3492 struct btrfs_inode_item *inode_item; 3553 struct btrfs_inode_item *inode_item;
3493 struct btrfs_timespec *tspec;
3494 struct btrfs_root *root = BTRFS_I(inode)->root; 3554 struct btrfs_root *root = BTRFS_I(inode)->root;
3495 struct btrfs_key location; 3555 struct btrfs_key location;
3496 unsigned long ptr; 3556 unsigned long ptr;
@@ -3527,17 +3587,19 @@ static void btrfs_read_locked_inode(struct inode *inode)
3527 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 3587 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3528 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); 3588 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
3529 3589
3530 tspec = btrfs_inode_atime(inode_item); 3590 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3531 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec); 3591 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3532 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); 3592
3593 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3594 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3533 3595
3534 tspec = btrfs_inode_mtime(inode_item); 3596 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3535 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec); 3597 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3536 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3537 3598
3538 tspec = btrfs_inode_ctime(inode_item); 3599 BTRFS_I(inode)->i_otime.tv_sec =
3539 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec); 3600 btrfs_timespec_sec(leaf, &inode_item->otime);
3540 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); 3601 BTRFS_I(inode)->i_otime.tv_nsec =
3602 btrfs_timespec_nsec(leaf, &inode_item->otime);
3541 3603
3542 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 3604 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3543 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3605 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
@@ -3656,21 +3718,26 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
3656 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3718 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3657 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3719 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3658 3720
3659 btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item), 3721 btrfs_set_token_timespec_sec(leaf, &item->atime,
3660 inode->i_atime.tv_sec, &token); 3722 inode->i_atime.tv_sec, &token);
3661 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item), 3723 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3662 inode->i_atime.tv_nsec, &token); 3724 inode->i_atime.tv_nsec, &token);
3663 3725
3664 btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item), 3726 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3665 inode->i_mtime.tv_sec, &token); 3727 inode->i_mtime.tv_sec, &token);
3666 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item), 3728 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3667 inode->i_mtime.tv_nsec, &token); 3729 inode->i_mtime.tv_nsec, &token);
3668 3730
3669 btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item), 3731 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3670 inode->i_ctime.tv_sec, &token); 3732 inode->i_ctime.tv_sec, &token);
3671 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item), 3733 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3672 inode->i_ctime.tv_nsec, &token); 3734 inode->i_ctime.tv_nsec, &token);
3673 3735
3736 btrfs_set_token_timespec_sec(leaf, &item->otime,
3737 BTRFS_I(inode)->i_otime.tv_sec, &token);
3738 btrfs_set_token_timespec_nsec(leaf, &item->otime,
3739 BTRFS_I(inode)->i_otime.tv_nsec, &token);
3740
3674 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3741 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3675 &token); 3742 &token);
3676 btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation, 3743 btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
@@ -5007,6 +5074,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
5007 struct btrfs_root *new_root; 5074 struct btrfs_root *new_root;
5008 struct btrfs_root_ref *ref; 5075 struct btrfs_root_ref *ref;
5009 struct extent_buffer *leaf; 5076 struct extent_buffer *leaf;
5077 struct btrfs_key key;
5010 int ret; 5078 int ret;
5011 int err = 0; 5079 int err = 0;
5012 5080
@@ -5017,9 +5085,12 @@ static int fixup_tree_root_location(struct btrfs_root *root,
5017 } 5085 }
5018 5086
5019 err = -ENOENT; 5087 err = -ENOENT;
5020 ret = btrfs_find_item(root->fs_info->tree_root, path, 5088 key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5021 BTRFS_I(dir)->root->root_key.objectid, 5089 key.type = BTRFS_ROOT_REF_KEY;
5022 location->objectid, BTRFS_ROOT_REF_KEY, NULL); 5090 key.offset = location->objectid;
5091
5092 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path,
5093 0, 0);
5023 if (ret) { 5094 if (ret) {
5024 if (ret < 0) 5095 if (ret < 0)
5025 err = ret; 5096 err = ret;
@@ -5258,7 +5329,10 @@ static struct inode *new_simple_dir(struct super_block *s,
5258 inode->i_op = &btrfs_dir_ro_inode_operations; 5329 inode->i_op = &btrfs_dir_ro_inode_operations;
5259 inode->i_fop = &simple_dir_operations; 5330 inode->i_fop = &simple_dir_operations;
5260 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5331 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5261 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 5332 inode->i_mtime = CURRENT_TIME;
5333 inode->i_atime = inode->i_mtime;
5334 inode->i_ctime = inode->i_mtime;
5335 BTRFS_I(inode)->i_otime = inode->i_mtime;
5262 5336
5263 return inode; 5337 return inode;
5264} 5338}
@@ -5826,7 +5900,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
5826 5900
5827 inode_init_owner(inode, dir, mode); 5901 inode_init_owner(inode, dir, mode);
5828 inode_set_bytes(inode, 0); 5902 inode_set_bytes(inode, 0);
5829 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 5903
5904 inode->i_mtime = CURRENT_TIME;
5905 inode->i_atime = inode->i_mtime;
5906 inode->i_ctime = inode->i_mtime;
5907 BTRFS_I(inode)->i_otime = inode->i_mtime;
5908
5830 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 5909 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
5831 struct btrfs_inode_item); 5910 struct btrfs_inode_item);
5832 memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item, 5911 memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
@@ -7134,11 +7213,12 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7134 u64 start = iblock << inode->i_blkbits; 7213 u64 start = iblock << inode->i_blkbits;
7135 u64 lockstart, lockend; 7214 u64 lockstart, lockend;
7136 u64 len = bh_result->b_size; 7215 u64 len = bh_result->b_size;
7216 u64 orig_len = len;
7137 int unlock_bits = EXTENT_LOCKED; 7217 int unlock_bits = EXTENT_LOCKED;
7138 int ret = 0; 7218 int ret = 0;
7139 7219
7140 if (create) 7220 if (create)
7141 unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY; 7221 unlock_bits |= EXTENT_DIRTY;
7142 else 7222 else
7143 len = min_t(u64, len, root->sectorsize); 7223 len = min_t(u64, len, root->sectorsize);
7144 7224
@@ -7269,14 +7349,12 @@ unlock:
7269 if (start + len > i_size_read(inode)) 7349 if (start + len > i_size_read(inode))
7270 i_size_write(inode, start + len); 7350 i_size_write(inode, start + len);
7271 7351
7272 spin_lock(&BTRFS_I(inode)->lock); 7352 if (len < orig_len) {
7273 BTRFS_I(inode)->outstanding_extents++; 7353 spin_lock(&BTRFS_I(inode)->lock);
7274 spin_unlock(&BTRFS_I(inode)->lock); 7354 BTRFS_I(inode)->outstanding_extents++;
7275 7355 spin_unlock(&BTRFS_I(inode)->lock);
7276 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, 7356 }
7277 lockstart + len - 1, EXTENT_DELALLOC, NULL, 7357 btrfs_free_reserved_data_space(inode, len);
7278 &cached_state, GFP_NOFS);
7279 BUG_ON(ret);
7280 } 7358 }
7281 7359
7282 /* 7360 /*
@@ -7805,8 +7883,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
7805 } 7883 }
7806 7884
7807 /* async crcs make it difficult to collect full stripe writes. */ 7885 /* async crcs make it difficult to collect full stripe writes. */
7808 if (btrfs_get_alloc_profile(root, 1) & 7886 if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK)
7809 (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
7810 async_submit = 0; 7887 async_submit = 0;
7811 else 7888 else
7812 async_submit = 1; 7889 async_submit = 1;
@@ -8053,8 +8130,6 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
8053 else if (ret >= 0 && (size_t)ret < count) 8130 else if (ret >= 0 && (size_t)ret < count)
8054 btrfs_delalloc_release_space(inode, 8131 btrfs_delalloc_release_space(inode,
8055 count - (size_t)ret); 8132 count - (size_t)ret);
8056 else
8057 btrfs_delalloc_release_metadata(inode, 0);
8058 } 8133 }
8059out: 8134out:
8060 if (wakeup) 8135 if (wakeup)
@@ -8575,6 +8650,9 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
8575 8650
8576 ei->delayed_node = NULL; 8651 ei->delayed_node = NULL;
8577 8652
8653 ei->i_otime.tv_sec = 0;
8654 ei->i_otime.tv_nsec = 0;
8655
8578 inode = &ei->vfs_inode; 8656 inode = &ei->vfs_inode;
8579 extent_map_tree_init(&ei->extent_tree); 8657 extent_map_tree_init(&ei->extent_tree);
8580 extent_io_tree_init(&ei->io_tree, &inode->i_data); 8658 extent_io_tree_init(&ei->io_tree, &inode->i_data);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d49fe8a0f6b5..74609b931ba5 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -776,11 +776,11 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
776 IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) 776 IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
777 return -EPERM; 777 return -EPERM;
778 if (isdir) { 778 if (isdir) {
779 if (!S_ISDIR(victim->d_inode->i_mode)) 779 if (!d_is_dir(victim))
780 return -ENOTDIR; 780 return -ENOTDIR;
781 if (IS_ROOT(victim)) 781 if (IS_ROOT(victim))
782 return -EBUSY; 782 return -EBUSY;
783 } else if (S_ISDIR(victim->d_inode->i_mode)) 783 } else if (d_is_dir(victim))
784 return -EISDIR; 784 return -EISDIR;
785 if (IS_DEADDIR(dir)) 785 if (IS_DEADDIR(dir))
786 return -ENOENT; 786 return -ENOENT;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 48b60dbf807f..97159a8e91d4 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1431,9 +1431,8 @@ static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1431 qgroup = u64_to_ptr(unode->aux); 1431 qgroup = u64_to_ptr(unode->aux);
1432 qgroup->rfer += sign * oper->num_bytes; 1432 qgroup->rfer += sign * oper->num_bytes;
1433 qgroup->rfer_cmpr += sign * oper->num_bytes; 1433 qgroup->rfer_cmpr += sign * oper->num_bytes;
1434 WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes);
1434 qgroup->excl += sign * oper->num_bytes; 1435 qgroup->excl += sign * oper->num_bytes;
1435 if (sign < 0)
1436 WARN_ON(qgroup->excl < oper->num_bytes);
1437 qgroup->excl_cmpr += sign * oper->num_bytes; 1436 qgroup->excl_cmpr += sign * oper->num_bytes;
1438 qgroup_dirty(fs_info, qgroup); 1437 qgroup_dirty(fs_info, qgroup);
1439 1438
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 8ab2a17bbba8..5264858ed768 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -58,15 +58,6 @@
58 */ 58 */
59#define RBIO_CACHE_READY_BIT 3 59#define RBIO_CACHE_READY_BIT 3
60 60
61/*
62 * bbio and raid_map is managed by the caller, so we shouldn't free
63 * them here. And besides that, all rbios with this flag should not
64 * be cached, because we need raid_map to check the rbios' stripe
65 * is the same or not, but it is very likely that the caller has
66 * free raid_map, so don't cache those rbios.
67 */
68#define RBIO_HOLD_BBIO_MAP_BIT 4
69
70#define RBIO_CACHE_SIZE 1024 61#define RBIO_CACHE_SIZE 1024
71 62
72enum btrfs_rbio_ops { 63enum btrfs_rbio_ops {
@@ -79,13 +70,6 @@ struct btrfs_raid_bio {
79 struct btrfs_fs_info *fs_info; 70 struct btrfs_fs_info *fs_info;
80 struct btrfs_bio *bbio; 71 struct btrfs_bio *bbio;
81 72
82 /*
83 * logical block numbers for the start of each stripe
84 * The last one or two are p/q. These are sorted,
85 * so raid_map[0] is the start of our full stripe
86 */
87 u64 *raid_map;
88
89 /* while we're doing rmw on a stripe 73 /* while we're doing rmw on a stripe
90 * we put it into a hash table so we can 74 * we put it into a hash table so we can
91 * lock the stripe and merge more rbios 75 * lock the stripe and merge more rbios
@@ -303,7 +287,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
303 */ 287 */
304static int rbio_bucket(struct btrfs_raid_bio *rbio) 288static int rbio_bucket(struct btrfs_raid_bio *rbio)
305{ 289{
306 u64 num = rbio->raid_map[0]; 290 u64 num = rbio->bbio->raid_map[0];
307 291
308 /* 292 /*
309 * we shift down quite a bit. We're using byte 293 * we shift down quite a bit. We're using byte
@@ -606,8 +590,8 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
606 test_bit(RBIO_CACHE_BIT, &cur->flags)) 590 test_bit(RBIO_CACHE_BIT, &cur->flags))
607 return 0; 591 return 0;
608 592
609 if (last->raid_map[0] != 593 if (last->bbio->raid_map[0] !=
610 cur->raid_map[0]) 594 cur->bbio->raid_map[0])
611 return 0; 595 return 0;
612 596
613 /* we can't merge with different operations */ 597 /* we can't merge with different operations */
@@ -689,7 +673,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
689 spin_lock_irqsave(&h->lock, flags); 673 spin_lock_irqsave(&h->lock, flags);
690 list_for_each_entry(cur, &h->hash_list, hash_list) { 674 list_for_each_entry(cur, &h->hash_list, hash_list) {
691 walk++; 675 walk++;
692 if (cur->raid_map[0] == rbio->raid_map[0]) { 676 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
693 spin_lock(&cur->bio_list_lock); 677 spin_lock(&cur->bio_list_lock);
694 678
695 /* can we steal this cached rbio's pages? */ 679 /* can we steal this cached rbio's pages? */
@@ -841,21 +825,6 @@ done_nolock:
841 remove_rbio_from_cache(rbio); 825 remove_rbio_from_cache(rbio);
842} 826}
843 827
844static inline void
845__free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need)
846{
847 if (need) {
848 kfree(raid_map);
849 kfree(bbio);
850 }
851}
852
853static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio)
854{
855 __free_bbio_and_raid_map(rbio->bbio, rbio->raid_map,
856 !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags));
857}
858
859static void __free_raid_bio(struct btrfs_raid_bio *rbio) 828static void __free_raid_bio(struct btrfs_raid_bio *rbio)
860{ 829{
861 int i; 830 int i;
@@ -875,8 +844,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio)
875 } 844 }
876 } 845 }
877 846
878 free_bbio_and_raid_map(rbio); 847 btrfs_put_bbio(rbio->bbio);
879
880 kfree(rbio); 848 kfree(rbio);
881} 849}
882 850
@@ -985,8 +953,7 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
985 * this does not allocate any pages for rbio->pages. 953 * this does not allocate any pages for rbio->pages.
986 */ 954 */
987static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, 955static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
988 struct btrfs_bio *bbio, u64 *raid_map, 956 struct btrfs_bio *bbio, u64 stripe_len)
989 u64 stripe_len)
990{ 957{
991 struct btrfs_raid_bio *rbio; 958 struct btrfs_raid_bio *rbio;
992 int nr_data = 0; 959 int nr_data = 0;
@@ -1007,7 +974,6 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
1007 INIT_LIST_HEAD(&rbio->stripe_cache); 974 INIT_LIST_HEAD(&rbio->stripe_cache);
1008 INIT_LIST_HEAD(&rbio->hash_list); 975 INIT_LIST_HEAD(&rbio->hash_list);
1009 rbio->bbio = bbio; 976 rbio->bbio = bbio;
1010 rbio->raid_map = raid_map;
1011 rbio->fs_info = root->fs_info; 977 rbio->fs_info = root->fs_info;
1012 rbio->stripe_len = stripe_len; 978 rbio->stripe_len = stripe_len;
1013 rbio->nr_pages = num_pages; 979 rbio->nr_pages = num_pages;
@@ -1028,10 +994,12 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
1028 rbio->bio_pages = p + sizeof(struct page *) * num_pages; 994 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1029 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; 995 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
1030 996
1031 if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE) 997 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
998 nr_data = real_stripes - 1;
999 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1032 nr_data = real_stripes - 2; 1000 nr_data = real_stripes - 2;
1033 else 1001 else
1034 nr_data = real_stripes - 1; 1002 BUG();
1035 1003
1036 rbio->nr_data = nr_data; 1004 rbio->nr_data = nr_data;
1037 return rbio; 1005 return rbio;
@@ -1182,7 +1150,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1182 spin_lock_irq(&rbio->bio_list_lock); 1150 spin_lock_irq(&rbio->bio_list_lock);
1183 bio_list_for_each(bio, &rbio->bio_list) { 1151 bio_list_for_each(bio, &rbio->bio_list) {
1184 start = (u64)bio->bi_iter.bi_sector << 9; 1152 start = (u64)bio->bi_iter.bi_sector << 9;
1185 stripe_offset = start - rbio->raid_map[0]; 1153 stripe_offset = start - rbio->bbio->raid_map[0];
1186 page_index = stripe_offset >> PAGE_CACHE_SHIFT; 1154 page_index = stripe_offset >> PAGE_CACHE_SHIFT;
1187 1155
1188 for (i = 0; i < bio->bi_vcnt; i++) { 1156 for (i = 0; i < bio->bi_vcnt; i++) {
@@ -1402,7 +1370,7 @@ static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1402 logical <<= 9; 1370 logical <<= 9;
1403 1371
1404 for (i = 0; i < rbio->nr_data; i++) { 1372 for (i = 0; i < rbio->nr_data; i++) {
1405 stripe_start = rbio->raid_map[i]; 1373 stripe_start = rbio->bbio->raid_map[i];
1406 if (logical >= stripe_start && 1374 if (logical >= stripe_start &&
1407 logical < stripe_start + rbio->stripe_len) { 1375 logical < stripe_start + rbio->stripe_len) {
1408 return i; 1376 return i;
@@ -1776,17 +1744,16 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1776 * our main entry point for writes from the rest of the FS. 1744 * our main entry point for writes from the rest of the FS.
1777 */ 1745 */
1778int raid56_parity_write(struct btrfs_root *root, struct bio *bio, 1746int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1779 struct btrfs_bio *bbio, u64 *raid_map, 1747 struct btrfs_bio *bbio, u64 stripe_len)
1780 u64 stripe_len)
1781{ 1748{
1782 struct btrfs_raid_bio *rbio; 1749 struct btrfs_raid_bio *rbio;
1783 struct btrfs_plug_cb *plug = NULL; 1750 struct btrfs_plug_cb *plug = NULL;
1784 struct blk_plug_cb *cb; 1751 struct blk_plug_cb *cb;
1785 int ret; 1752 int ret;
1786 1753
1787 rbio = alloc_rbio(root, bbio, raid_map, stripe_len); 1754 rbio = alloc_rbio(root, bbio, stripe_len);
1788 if (IS_ERR(rbio)) { 1755 if (IS_ERR(rbio)) {
1789 __free_bbio_and_raid_map(bbio, raid_map, 1); 1756 btrfs_put_bbio(bbio);
1790 return PTR_ERR(rbio); 1757 return PTR_ERR(rbio);
1791 } 1758 }
1792 bio_list_add(&rbio->bio_list, bio); 1759 bio_list_add(&rbio->bio_list, bio);
@@ -1885,9 +1852,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1885 } 1852 }
1886 1853
1887 /* all raid6 handling here */ 1854 /* all raid6 handling here */
1888 if (rbio->raid_map[rbio->real_stripes - 1] == 1855 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1889 RAID6_Q_STRIPE) {
1890
1891 /* 1856 /*
1892 * single failure, rebuild from parity raid5 1857 * single failure, rebuild from parity raid5
1893 * style 1858 * style
@@ -1922,8 +1887,9 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1922 * here due to a crc mismatch and we can't give them the 1887 * here due to a crc mismatch and we can't give them the
1923 * data they want 1888 * data they want
1924 */ 1889 */
1925 if (rbio->raid_map[failb] == RAID6_Q_STRIPE) { 1890 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1926 if (rbio->raid_map[faila] == RAID5_P_STRIPE) { 1891 if (rbio->bbio->raid_map[faila] ==
1892 RAID5_P_STRIPE) {
1927 err = -EIO; 1893 err = -EIO;
1928 goto cleanup; 1894 goto cleanup;
1929 } 1895 }
@@ -1934,7 +1900,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1934 goto pstripe; 1900 goto pstripe;
1935 } 1901 }
1936 1902
1937 if (rbio->raid_map[failb] == RAID5_P_STRIPE) { 1903 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1938 raid6_datap_recov(rbio->real_stripes, 1904 raid6_datap_recov(rbio->real_stripes,
1939 PAGE_SIZE, faila, pointers); 1905 PAGE_SIZE, faila, pointers);
1940 } else { 1906 } else {
@@ -2001,8 +1967,7 @@ cleanup:
2001 1967
2002cleanup_io: 1968cleanup_io:
2003 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { 1969 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
2004 if (err == 0 && 1970 if (err == 0)
2005 !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags))
2006 cache_rbio_pages(rbio); 1971 cache_rbio_pages(rbio);
2007 else 1972 else
2008 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 1973 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
@@ -2156,15 +2121,16 @@ cleanup:
2156 * of the drive. 2121 * of the drive.
2157 */ 2122 */
2158int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, 2123int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2159 struct btrfs_bio *bbio, u64 *raid_map, 2124 struct btrfs_bio *bbio, u64 stripe_len,
2160 u64 stripe_len, int mirror_num, int generic_io) 2125 int mirror_num, int generic_io)
2161{ 2126{
2162 struct btrfs_raid_bio *rbio; 2127 struct btrfs_raid_bio *rbio;
2163 int ret; 2128 int ret;
2164 2129
2165 rbio = alloc_rbio(root, bbio, raid_map, stripe_len); 2130 rbio = alloc_rbio(root, bbio, stripe_len);
2166 if (IS_ERR(rbio)) { 2131 if (IS_ERR(rbio)) {
2167 __free_bbio_and_raid_map(bbio, raid_map, generic_io); 2132 if (generic_io)
2133 btrfs_put_bbio(bbio);
2168 return PTR_ERR(rbio); 2134 return PTR_ERR(rbio);
2169 } 2135 }
2170 2136
@@ -2175,7 +2141,8 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2175 rbio->faila = find_logical_bio_stripe(rbio, bio); 2141 rbio->faila = find_logical_bio_stripe(rbio, bio);
2176 if (rbio->faila == -1) { 2142 if (rbio->faila == -1) {
2177 BUG(); 2143 BUG();
2178 __free_bbio_and_raid_map(bbio, raid_map, generic_io); 2144 if (generic_io)
2145 btrfs_put_bbio(bbio);
2179 kfree(rbio); 2146 kfree(rbio);
2180 return -EIO; 2147 return -EIO;
2181 } 2148 }
@@ -2184,7 +2151,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2184 btrfs_bio_counter_inc_noblocked(root->fs_info); 2151 btrfs_bio_counter_inc_noblocked(root->fs_info);
2185 rbio->generic_bio_cnt = 1; 2152 rbio->generic_bio_cnt = 1;
2186 } else { 2153 } else {
2187 set_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags); 2154 btrfs_get_bbio(bbio);
2188 } 2155 }
2189 2156
2190 /* 2157 /*
@@ -2240,14 +2207,14 @@ static void read_rebuild_work(struct btrfs_work *work)
2240 2207
2241struct btrfs_raid_bio * 2208struct btrfs_raid_bio *
2242raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, 2209raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
2243 struct btrfs_bio *bbio, u64 *raid_map, 2210 struct btrfs_bio *bbio, u64 stripe_len,
2244 u64 stripe_len, struct btrfs_device *scrub_dev, 2211 struct btrfs_device *scrub_dev,
2245 unsigned long *dbitmap, int stripe_nsectors) 2212 unsigned long *dbitmap, int stripe_nsectors)
2246{ 2213{
2247 struct btrfs_raid_bio *rbio; 2214 struct btrfs_raid_bio *rbio;
2248 int i; 2215 int i;
2249 2216
2250 rbio = alloc_rbio(root, bbio, raid_map, stripe_len); 2217 rbio = alloc_rbio(root, bbio, stripe_len);
2251 if (IS_ERR(rbio)) 2218 if (IS_ERR(rbio))
2252 return NULL; 2219 return NULL;
2253 bio_list_add(&rbio->bio_list, bio); 2220 bio_list_add(&rbio->bio_list, bio);
@@ -2279,10 +2246,10 @@ void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
2279 int stripe_offset; 2246 int stripe_offset;
2280 int index; 2247 int index;
2281 2248
2282 ASSERT(logical >= rbio->raid_map[0]); 2249 ASSERT(logical >= rbio->bbio->raid_map[0]);
2283 ASSERT(logical + PAGE_SIZE <= rbio->raid_map[0] + 2250 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2284 rbio->stripe_len * rbio->nr_data); 2251 rbio->stripe_len * rbio->nr_data);
2285 stripe_offset = (int)(logical - rbio->raid_map[0]); 2252 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2286 index = stripe_offset >> PAGE_CACHE_SHIFT; 2253 index = stripe_offset >> PAGE_CACHE_SHIFT;
2287 rbio->bio_pages[index] = page; 2254 rbio->bio_pages[index] = page;
2288} 2255}
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
index 31d4a157b5e3..2b5d7977d83b 100644
--- a/fs/btrfs/raid56.h
+++ b/fs/btrfs/raid56.h
@@ -43,16 +43,15 @@ struct btrfs_raid_bio;
43struct btrfs_device; 43struct btrfs_device;
44 44
45int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, 45int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
46 struct btrfs_bio *bbio, u64 *raid_map, 46 struct btrfs_bio *bbio, u64 stripe_len,
47 u64 stripe_len, int mirror_num, int generic_io); 47 int mirror_num, int generic_io);
48int raid56_parity_write(struct btrfs_root *root, struct bio *bio, 48int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
49 struct btrfs_bio *bbio, u64 *raid_map, 49 struct btrfs_bio *bbio, u64 stripe_len);
50 u64 stripe_len);
51 50
52struct btrfs_raid_bio * 51struct btrfs_raid_bio *
53raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, 52raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
54 struct btrfs_bio *bbio, u64 *raid_map, 53 struct btrfs_bio *bbio, u64 stripe_len,
55 u64 stripe_len, struct btrfs_device *scrub_dev, 54 struct btrfs_device *scrub_dev,
56 unsigned long *dbitmap, int stripe_nsectors); 55 unsigned long *dbitmap, int stripe_nsectors);
57void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio, 56void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
58 struct page *page, u64 logical); 57 struct page *page, u64 logical);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index b63ae20618fb..0e7beea92b4c 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -66,7 +66,6 @@ struct reada_extctl {
66struct reada_extent { 66struct reada_extent {
67 u64 logical; 67 u64 logical;
68 struct btrfs_key top; 68 struct btrfs_key top;
69 u32 blocksize;
70 int err; 69 int err;
71 struct list_head extctl; 70 struct list_head extctl;
72 int refcnt; 71 int refcnt;
@@ -349,7 +348,6 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
349 348
350 blocksize = root->nodesize; 349 blocksize = root->nodesize;
351 re->logical = logical; 350 re->logical = logical;
352 re->blocksize = blocksize;
353 re->top = *top; 351 re->top = *top;
354 INIT_LIST_HEAD(&re->extctl); 352 INIT_LIST_HEAD(&re->extctl);
355 spin_lock_init(&re->lock); 353 spin_lock_init(&re->lock);
@@ -463,7 +461,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
463 spin_unlock(&fs_info->reada_lock); 461 spin_unlock(&fs_info->reada_lock);
464 btrfs_dev_replace_unlock(&fs_info->dev_replace); 462 btrfs_dev_replace_unlock(&fs_info->dev_replace);
465 463
466 kfree(bbio); 464 btrfs_put_bbio(bbio);
467 return re; 465 return re;
468 466
469error: 467error:
@@ -488,7 +486,7 @@ error:
488 kref_put(&zone->refcnt, reada_zone_release); 486 kref_put(&zone->refcnt, reada_zone_release);
489 spin_unlock(&fs_info->reada_lock); 487 spin_unlock(&fs_info->reada_lock);
490 } 488 }
491 kfree(bbio); 489 btrfs_put_bbio(bbio);
492 kfree(re); 490 kfree(re);
493 return re_exist; 491 return re_exist;
494} 492}
@@ -660,7 +658,6 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
660 int mirror_num = 0; 658 int mirror_num = 0;
661 struct extent_buffer *eb = NULL; 659 struct extent_buffer *eb = NULL;
662 u64 logical; 660 u64 logical;
663 u32 blocksize;
664 int ret; 661 int ret;
665 int i; 662 int i;
666 int need_kick = 0; 663 int need_kick = 0;
@@ -694,7 +691,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
694 spin_unlock(&fs_info->reada_lock); 691 spin_unlock(&fs_info->reada_lock);
695 return 0; 692 return 0;
696 } 693 }
697 dev->reada_next = re->logical + re->blocksize; 694 dev->reada_next = re->logical + fs_info->tree_root->nodesize;
698 re->refcnt++; 695 re->refcnt++;
699 696
700 spin_unlock(&fs_info->reada_lock); 697 spin_unlock(&fs_info->reada_lock);
@@ -709,7 +706,6 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
709 } 706 }
710 } 707 }
711 logical = re->logical; 708 logical = re->logical;
712 blocksize = re->blocksize;
713 709
714 spin_lock(&re->lock); 710 spin_lock(&re->lock);
715 if (re->scheduled_for == NULL) { 711 if (re->scheduled_for == NULL) {
@@ -724,8 +720,8 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
724 return 0; 720 return 0;
725 721
726 atomic_inc(&dev->reada_in_flight); 722 atomic_inc(&dev->reada_in_flight);
727 ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize, 723 ret = reada_tree_block_flagged(fs_info->extent_root, logical,
728 mirror_num, &eb); 724 mirror_num, &eb);
729 if (ret) 725 if (ret)
730 __readahead_hook(fs_info->extent_root, NULL, logical, ret); 726 __readahead_hook(fs_info->extent_root, NULL, logical, ret);
731 else if (eb) 727 else if (eb)
@@ -851,7 +847,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
851 break; 847 break;
852 printk(KERN_DEBUG 848 printk(KERN_DEBUG
853 " re: logical %llu size %u empty %d for %lld", 849 " re: logical %llu size %u empty %d for %lld",
854 re->logical, re->blocksize, 850 re->logical, fs_info->tree_root->nodesize,
855 list_empty(&re->extctl), re->scheduled_for ? 851 list_empty(&re->extctl), re->scheduled_for ?
856 re->scheduled_for->devid : -1); 852 re->scheduled_for->devid : -1);
857 853
@@ -886,7 +882,8 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
886 } 882 }
887 printk(KERN_DEBUG 883 printk(KERN_DEBUG
888 "re: logical %llu size %u list empty %d for %lld", 884 "re: logical %llu size %u list empty %d for %lld",
889 re->logical, re->blocksize, list_empty(&re->extctl), 885 re->logical, fs_info->tree_root->nodesize,
886 list_empty(&re->extctl),
890 re->scheduled_for ? re->scheduled_for->devid : -1); 887 re->scheduled_for ? re->scheduled_for->devid : -1);
891 for (i = 0; i < re->nzones; ++i) { 888 for (i = 0; i < re->nzones; ++i) {
892 printk(KERN_CONT " zone %llu-%llu devs", 889 printk(KERN_CONT " zone %llu-%llu devs",
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 74257d6436ad..d83085381bcc 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2855,9 +2855,10 @@ static void update_processed_blocks(struct reloc_control *rc,
2855 } 2855 }
2856} 2856}
2857 2857
2858static int tree_block_processed(u64 bytenr, u32 blocksize, 2858static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2859 struct reloc_control *rc)
2860{ 2859{
2860 u32 blocksize = rc->extent_root->nodesize;
2861
2861 if (test_range_bit(&rc->processed_blocks, bytenr, 2862 if (test_range_bit(&rc->processed_blocks, bytenr,
2862 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) 2863 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2863 return 1; 2864 return 1;
@@ -2965,8 +2966,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2965 while (rb_node) { 2966 while (rb_node) {
2966 block = rb_entry(rb_node, struct tree_block, rb_node); 2967 block = rb_entry(rb_node, struct tree_block, rb_node);
2967 if (!block->key_ready) 2968 if (!block->key_ready)
2968 readahead_tree_block(rc->extent_root, block->bytenr, 2969 readahead_tree_block(rc->extent_root, block->bytenr);
2969 block->key.objectid);
2970 rb_node = rb_next(rb_node); 2970 rb_node = rb_next(rb_node);
2971 } 2971 }
2972 2972
@@ -3353,7 +3353,7 @@ static int __add_tree_block(struct reloc_control *rc,
3353 bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info, 3353 bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info,
3354 SKINNY_METADATA); 3354 SKINNY_METADATA);
3355 3355
3356 if (tree_block_processed(bytenr, blocksize, rc)) 3356 if (tree_block_processed(bytenr, rc))
3357 return 0; 3357 return 0;
3358 3358
3359 if (tree_search(blocks, bytenr)) 3359 if (tree_search(blocks, bytenr))
@@ -3611,7 +3611,7 @@ static int find_data_references(struct reloc_control *rc,
3611 if (added) 3611 if (added)
3612 goto next; 3612 goto next;
3613 3613
3614 if (!tree_block_processed(leaf->start, leaf->len, rc)) { 3614 if (!tree_block_processed(leaf->start, rc)) {
3615 block = kmalloc(sizeof(*block), GFP_NOFS); 3615 block = kmalloc(sizeof(*block), GFP_NOFS);
3616 if (!block) { 3616 if (!block) {
3617 err = -ENOMEM; 3617 err = -ENOMEM;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index e427cb7ee12c..ec57687c9a4d 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -66,7 +66,6 @@ struct scrub_ctx;
66struct scrub_recover { 66struct scrub_recover {
67 atomic_t refs; 67 atomic_t refs;
68 struct btrfs_bio *bbio; 68 struct btrfs_bio *bbio;
69 u64 *raid_map;
70 u64 map_length; 69 u64 map_length;
71}; 70};
72 71
@@ -80,7 +79,7 @@ struct scrub_page {
80 u64 logical; 79 u64 logical;
81 u64 physical; 80 u64 physical;
82 u64 physical_for_dev_replace; 81 u64 physical_for_dev_replace;
83 atomic_t ref_count; 82 atomic_t refs;
84 struct { 83 struct {
85 unsigned int mirror_num:8; 84 unsigned int mirror_num:8;
86 unsigned int have_csum:1; 85 unsigned int have_csum:1;
@@ -113,7 +112,7 @@ struct scrub_block {
113 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK]; 112 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
114 int page_count; 113 int page_count;
115 atomic_t outstanding_pages; 114 atomic_t outstanding_pages;
116 atomic_t ref_count; /* free mem on transition to zero */ 115 atomic_t refs; /* free mem on transition to zero */
117 struct scrub_ctx *sctx; 116 struct scrub_ctx *sctx;
118 struct scrub_parity *sparity; 117 struct scrub_parity *sparity;
119 struct { 118 struct {
@@ -142,7 +141,7 @@ struct scrub_parity {
142 141
143 int stripe_len; 142 int stripe_len;
144 143
145 atomic_t ref_count; 144 atomic_t refs;
146 145
147 struct list_head spages; 146 struct list_head spages;
148 147
@@ -194,6 +193,15 @@ struct scrub_ctx {
194 */ 193 */
195 struct btrfs_scrub_progress stat; 194 struct btrfs_scrub_progress stat;
196 spinlock_t stat_lock; 195 spinlock_t stat_lock;
196
197 /*
198 * Use a ref counter to avoid use-after-free issues. Scrub workers
199 * decrement bios_in_flight and workers_pending and then do a wakeup
200 * on the list_wait wait queue. We must ensure the main scrub task
201 * doesn't free the scrub context before or while the workers are
202 * doing the wakeup() call.
203 */
204 atomic_t refs;
197}; 205};
198 206
199struct scrub_fixup_nodatasum { 207struct scrub_fixup_nodatasum {
@@ -236,10 +244,7 @@ static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
236static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx); 244static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
237static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx); 245static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
238static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); 246static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
239static int scrub_setup_recheck_block(struct scrub_ctx *sctx, 247static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
240 struct btrfs_fs_info *fs_info,
241 struct scrub_block *original_sblock,
242 u64 length, u64 logical,
243 struct scrub_block *sblocks_for_recheck); 248 struct scrub_block *sblocks_for_recheck);
244static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 249static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
245 struct scrub_block *sblock, int is_metadata, 250 struct scrub_block *sblock, int is_metadata,
@@ -251,8 +256,7 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
251 const u8 *csum, u64 generation, 256 const u8 *csum, u64 generation,
252 u16 csum_size); 257 u16 csum_size);
253static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 258static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
254 struct scrub_block *sblock_good, 259 struct scrub_block *sblock_good);
255 int force_write);
256static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, 260static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
257 struct scrub_block *sblock_good, 261 struct scrub_block *sblock_good,
258 int page_num, int force_write); 262 int page_num, int force_write);
@@ -302,10 +306,12 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
302static void copy_nocow_pages_worker(struct btrfs_work *work); 306static void copy_nocow_pages_worker(struct btrfs_work *work);
303static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); 307static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
304static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); 308static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
309static void scrub_put_ctx(struct scrub_ctx *sctx);
305 310
306 311
307static void scrub_pending_bio_inc(struct scrub_ctx *sctx) 312static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
308{ 313{
314 atomic_inc(&sctx->refs);
309 atomic_inc(&sctx->bios_in_flight); 315 atomic_inc(&sctx->bios_in_flight);
310} 316}
311 317
@@ -313,6 +319,7 @@ static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
313{ 319{
314 atomic_dec(&sctx->bios_in_flight); 320 atomic_dec(&sctx->bios_in_flight);
315 wake_up(&sctx->list_wait); 321 wake_up(&sctx->list_wait);
322 scrub_put_ctx(sctx);
316} 323}
317 324
318static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) 325static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
@@ -346,6 +353,7 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
346{ 353{
347 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; 354 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
348 355
356 atomic_inc(&sctx->refs);
349 /* 357 /*
350 * increment scrubs_running to prevent cancel requests from 358 * increment scrubs_running to prevent cancel requests from
351 * completing as long as a worker is running. we must also 359 * completing as long as a worker is running. we must also
@@ -388,6 +396,7 @@ static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
388 atomic_dec(&sctx->workers_pending); 396 atomic_dec(&sctx->workers_pending);
389 wake_up(&fs_info->scrub_pause_wait); 397 wake_up(&fs_info->scrub_pause_wait);
390 wake_up(&sctx->list_wait); 398 wake_up(&sctx->list_wait);
399 scrub_put_ctx(sctx);
391} 400}
392 401
393static void scrub_free_csums(struct scrub_ctx *sctx) 402static void scrub_free_csums(struct scrub_ctx *sctx)
@@ -433,6 +442,12 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
433 kfree(sctx); 442 kfree(sctx);
434} 443}
435 444
445static void scrub_put_ctx(struct scrub_ctx *sctx)
446{
447 if (atomic_dec_and_test(&sctx->refs))
448 scrub_free_ctx(sctx);
449}
450
436static noinline_for_stack 451static noinline_for_stack
437struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) 452struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
438{ 453{
@@ -457,6 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
457 sctx = kzalloc(sizeof(*sctx), GFP_NOFS); 472 sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
458 if (!sctx) 473 if (!sctx)
459 goto nomem; 474 goto nomem;
475 atomic_set(&sctx->refs, 1);
460 sctx->is_dev_replace = is_dev_replace; 476 sctx->is_dev_replace = is_dev_replace;
461 sctx->pages_per_rd_bio = pages_per_rd_bio; 477 sctx->pages_per_rd_bio = pages_per_rd_bio;
462 sctx->curr = -1; 478 sctx->curr = -1;
@@ -520,6 +536,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
520 struct inode_fs_paths *ipath = NULL; 536 struct inode_fs_paths *ipath = NULL;
521 struct btrfs_root *local_root; 537 struct btrfs_root *local_root;
522 struct btrfs_key root_key; 538 struct btrfs_key root_key;
539 struct btrfs_key key;
523 540
524 root_key.objectid = root; 541 root_key.objectid = root;
525 root_key.type = BTRFS_ROOT_ITEM_KEY; 542 root_key.type = BTRFS_ROOT_ITEM_KEY;
@@ -530,7 +547,14 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
530 goto err; 547 goto err;
531 } 548 }
532 549
533 ret = inode_item_info(inum, 0, local_root, swarn->path); 550 /*
551 * this makes the path point to (inum INODE_ITEM ioff)
552 */
553 key.objectid = inum;
554 key.type = BTRFS_INODE_ITEM_KEY;
555 key.offset = 0;
556
557 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
534 if (ret) { 558 if (ret) {
535 btrfs_release_path(swarn->path); 559 btrfs_release_path(swarn->path);
536 goto err; 560 goto err;
@@ -848,8 +872,7 @@ static inline void scrub_get_recover(struct scrub_recover *recover)
848static inline void scrub_put_recover(struct scrub_recover *recover) 872static inline void scrub_put_recover(struct scrub_recover *recover)
849{ 873{
850 if (atomic_dec_and_test(&recover->refs)) { 874 if (atomic_dec_and_test(&recover->refs)) {
851 kfree(recover->bbio); 875 btrfs_put_bbio(recover->bbio);
852 kfree(recover->raid_map);
853 kfree(recover); 876 kfree(recover);
854 } 877 }
855} 878}
@@ -955,8 +978,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
955 } 978 }
956 979
957 /* setup the context, map the logical blocks and alloc the pages */ 980 /* setup the context, map the logical blocks and alloc the pages */
958 ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length, 981 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
959 logical, sblocks_for_recheck);
960 if (ret) { 982 if (ret) {
961 spin_lock(&sctx->stat_lock); 983 spin_lock(&sctx->stat_lock);
962 sctx->stat.read_errors++; 984 sctx->stat.read_errors++;
@@ -1030,9 +1052,10 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
1030 if (!is_metadata && !have_csum) { 1052 if (!is_metadata && !have_csum) {
1031 struct scrub_fixup_nodatasum *fixup_nodatasum; 1053 struct scrub_fixup_nodatasum *fixup_nodatasum;
1032 1054
1033nodatasum_case:
1034 WARN_ON(sctx->is_dev_replace); 1055 WARN_ON(sctx->is_dev_replace);
1035 1056
1057nodatasum_case:
1058
1036 /* 1059 /*
1037 * !is_metadata and !have_csum, this means that the data 1060 * !is_metadata and !have_csum, this means that the data
1038 * might not be COW'ed, that it might be modified 1061 * might not be COW'ed, that it might be modified
@@ -1091,76 +1114,20 @@ nodatasum_case:
1091 sblock_other->no_io_error_seen) { 1114 sblock_other->no_io_error_seen) {
1092 if (sctx->is_dev_replace) { 1115 if (sctx->is_dev_replace) {
1093 scrub_write_block_to_dev_replace(sblock_other); 1116 scrub_write_block_to_dev_replace(sblock_other);
1117 goto corrected_error;
1094 } else { 1118 } else {
1095 int force_write = is_metadata || have_csum;
1096
1097 ret = scrub_repair_block_from_good_copy( 1119 ret = scrub_repair_block_from_good_copy(
1098 sblock_bad, sblock_other, 1120 sblock_bad, sblock_other);
1099 force_write); 1121 if (!ret)
1122 goto corrected_error;
1100 } 1123 }
1101 if (0 == ret)
1102 goto corrected_error;
1103 } 1124 }
1104 } 1125 }
1105 1126
1106 /* 1127 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1107 * for dev_replace, pick good pages and write to the target device. 1128 goto did_not_correct_error;
1108 */
1109 if (sctx->is_dev_replace) {
1110 success = 1;
1111 for (page_num = 0; page_num < sblock_bad->page_count;
1112 page_num++) {
1113 int sub_success;
1114
1115 sub_success = 0;
1116 for (mirror_index = 0;
1117 mirror_index < BTRFS_MAX_MIRRORS &&
1118 sblocks_for_recheck[mirror_index].page_count > 0;
1119 mirror_index++) {
1120 struct scrub_block *sblock_other =
1121 sblocks_for_recheck + mirror_index;
1122 struct scrub_page *page_other =
1123 sblock_other->pagev[page_num];
1124
1125 if (!page_other->io_error) {
1126 ret = scrub_write_page_to_dev_replace(
1127 sblock_other, page_num);
1128 if (ret == 0) {
1129 /* succeeded for this page */
1130 sub_success = 1;
1131 break;
1132 } else {
1133 btrfs_dev_replace_stats_inc(
1134 &sctx->dev_root->
1135 fs_info->dev_replace.
1136 num_write_errors);
1137 }
1138 }
1139 }
1140
1141 if (!sub_success) {
1142 /*
1143 * did not find a mirror to fetch the page
1144 * from. scrub_write_page_to_dev_replace()
1145 * handles this case (page->io_error), by
1146 * filling the block with zeros before
1147 * submitting the write request
1148 */
1149 success = 0;
1150 ret = scrub_write_page_to_dev_replace(
1151 sblock_bad, page_num);
1152 if (ret)
1153 btrfs_dev_replace_stats_inc(
1154 &sctx->dev_root->fs_info->
1155 dev_replace.num_write_errors);
1156 }
1157 }
1158
1159 goto out;
1160 }
1161 1129
1162 /* 1130 /*
1163 * for regular scrub, repair those pages that are errored.
1164 * In case of I/O errors in the area that is supposed to be 1131 * In case of I/O errors in the area that is supposed to be
1165 * repaired, continue by picking good copies of those pages. 1132 * repaired, continue by picking good copies of those pages.
1166 * Select the good pages from mirrors to rewrite bad pages from 1133 * Select the good pages from mirrors to rewrite bad pages from
@@ -1184,44 +1151,64 @@ nodatasum_case:
1184 * mirror, even if other 512 byte sectors in the same PAGE_SIZE 1151 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1185 * area are unreadable. 1152 * area are unreadable.
1186 */ 1153 */
1187
1188 /* can only fix I/O errors from here on */
1189 if (sblock_bad->no_io_error_seen)
1190 goto did_not_correct_error;
1191
1192 success = 1; 1154 success = 1;
1193 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { 1155 for (page_num = 0; page_num < sblock_bad->page_count;
1156 page_num++) {
1194 struct scrub_page *page_bad = sblock_bad->pagev[page_num]; 1157 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1158 struct scrub_block *sblock_other = NULL;
1195 1159
1196 if (!page_bad->io_error) 1160 /* skip no-io-error page in scrub */
1161 if (!page_bad->io_error && !sctx->is_dev_replace)
1197 continue; 1162 continue;
1198 1163
1199 for (mirror_index = 0; 1164 /* try to find no-io-error page in mirrors */
1200 mirror_index < BTRFS_MAX_MIRRORS && 1165 if (page_bad->io_error) {
1201 sblocks_for_recheck[mirror_index].page_count > 0; 1166 for (mirror_index = 0;
1202 mirror_index++) { 1167 mirror_index < BTRFS_MAX_MIRRORS &&
1203 struct scrub_block *sblock_other = sblocks_for_recheck + 1168 sblocks_for_recheck[mirror_index].page_count > 0;
1204 mirror_index; 1169 mirror_index++) {
1205 struct scrub_page *page_other = sblock_other->pagev[ 1170 if (!sblocks_for_recheck[mirror_index].
1206 page_num]; 1171 pagev[page_num]->io_error) {
1207 1172 sblock_other = sblocks_for_recheck +
1208 if (!page_other->io_error) { 1173 mirror_index;
1209 ret = scrub_repair_page_from_good_copy( 1174 break;
1210 sblock_bad, sblock_other, page_num, 0);
1211 if (0 == ret) {
1212 page_bad->io_error = 0;
1213 break; /* succeeded for this page */
1214 } 1175 }
1215 } 1176 }
1177 if (!sblock_other)
1178 success = 0;
1216 } 1179 }
1217 1180
1218 if (page_bad->io_error) { 1181 if (sctx->is_dev_replace) {
1219 /* did not find a mirror to copy the page from */ 1182 /*
1220 success = 0; 1183 * did not find a mirror to fetch the page
1184 * from. scrub_write_page_to_dev_replace()
1185 * handles this case (page->io_error), by
1186 * filling the block with zeros before
1187 * submitting the write request
1188 */
1189 if (!sblock_other)
1190 sblock_other = sblock_bad;
1191
1192 if (scrub_write_page_to_dev_replace(sblock_other,
1193 page_num) != 0) {
1194 btrfs_dev_replace_stats_inc(
1195 &sctx->dev_root->
1196 fs_info->dev_replace.
1197 num_write_errors);
1198 success = 0;
1199 }
1200 } else if (sblock_other) {
1201 ret = scrub_repair_page_from_good_copy(sblock_bad,
1202 sblock_other,
1203 page_num, 0);
1204 if (0 == ret)
1205 page_bad->io_error = 0;
1206 else
1207 success = 0;
1221 } 1208 }
1222 } 1209 }
1223 1210
1224 if (success) { 1211 if (success && !sctx->is_dev_replace) {
1225 if (is_metadata || have_csum) { 1212 if (is_metadata || have_csum) {
1226 /* 1213 /*
1227 * need to verify the checksum now that all 1214 * need to verify the checksum now that all
@@ -1288,19 +1275,18 @@ out:
1288 return 0; 1275 return 0;
1289} 1276}
1290 1277
1291static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map) 1278static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1292{ 1279{
1293 if (raid_map) { 1280 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1294 if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE) 1281 return 2;
1295 return 3; 1282 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1296 else 1283 return 3;
1297 return 2; 1284 else
1298 } else {
1299 return (int)bbio->num_stripes; 1285 return (int)bbio->num_stripes;
1300 }
1301} 1286}
1302 1287
1303static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map, 1288static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1289 u64 *raid_map,
1304 u64 mapped_length, 1290 u64 mapped_length,
1305 int nstripes, int mirror, 1291 int nstripes, int mirror,
1306 int *stripe_index, 1292 int *stripe_index,
@@ -1308,7 +1294,7 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
1308{ 1294{
1309 int i; 1295 int i;
1310 1296
1311 if (raid_map) { 1297 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1312 /* RAID5/6 */ 1298 /* RAID5/6 */
1313 for (i = 0; i < nstripes; i++) { 1299 for (i = 0; i < nstripes; i++) {
1314 if (raid_map[i] == RAID6_Q_STRIPE || 1300 if (raid_map[i] == RAID6_Q_STRIPE ||
@@ -1329,72 +1315,65 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
1329 } 1315 }
1330} 1316}
1331 1317
1332static int scrub_setup_recheck_block(struct scrub_ctx *sctx, 1318static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1333 struct btrfs_fs_info *fs_info,
1334 struct scrub_block *original_sblock,
1335 u64 length, u64 logical,
1336 struct scrub_block *sblocks_for_recheck) 1319 struct scrub_block *sblocks_for_recheck)
1337{ 1320{
1321 struct scrub_ctx *sctx = original_sblock->sctx;
1322 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
1323 u64 length = original_sblock->page_count * PAGE_SIZE;
1324 u64 logical = original_sblock->pagev[0]->logical;
1338 struct scrub_recover *recover; 1325 struct scrub_recover *recover;
1339 struct btrfs_bio *bbio; 1326 struct btrfs_bio *bbio;
1340 u64 *raid_map;
1341 u64 sublen; 1327 u64 sublen;
1342 u64 mapped_length; 1328 u64 mapped_length;
1343 u64 stripe_offset; 1329 u64 stripe_offset;
1344 int stripe_index; 1330 int stripe_index;
1345 int page_index; 1331 int page_index = 0;
1346 int mirror_index; 1332 int mirror_index;
1347 int nmirrors; 1333 int nmirrors;
1348 int ret; 1334 int ret;
1349 1335
1350 /* 1336 /*
1351 * note: the two members ref_count and outstanding_pages 1337 * note: the two members refs and outstanding_pages
1352 * are not used (and not set) in the blocks that are used for 1338 * are not used (and not set) in the blocks that are used for
1353 * the recheck procedure 1339 * the recheck procedure
1354 */ 1340 */
1355 1341
1356 page_index = 0;
1357 while (length > 0) { 1342 while (length > 0) {
1358 sublen = min_t(u64, length, PAGE_SIZE); 1343 sublen = min_t(u64, length, PAGE_SIZE);
1359 mapped_length = sublen; 1344 mapped_length = sublen;
1360 bbio = NULL; 1345 bbio = NULL;
1361 raid_map = NULL;
1362 1346
1363 /* 1347 /*
1364 * with a length of PAGE_SIZE, each returned stripe 1348 * with a length of PAGE_SIZE, each returned stripe
1365 * represents one mirror 1349 * represents one mirror
1366 */ 1350 */
1367 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, 1351 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
1368 &mapped_length, &bbio, 0, &raid_map); 1352 &mapped_length, &bbio, 0, 1);
1369 if (ret || !bbio || mapped_length < sublen) { 1353 if (ret || !bbio || mapped_length < sublen) {
1370 kfree(bbio); 1354 btrfs_put_bbio(bbio);
1371 kfree(raid_map);
1372 return -EIO; 1355 return -EIO;
1373 } 1356 }
1374 1357
1375 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); 1358 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1376 if (!recover) { 1359 if (!recover) {
1377 kfree(bbio); 1360 btrfs_put_bbio(bbio);
1378 kfree(raid_map);
1379 return -ENOMEM; 1361 return -ENOMEM;
1380 } 1362 }
1381 1363
1382 atomic_set(&recover->refs, 1); 1364 atomic_set(&recover->refs, 1);
1383 recover->bbio = bbio; 1365 recover->bbio = bbio;
1384 recover->raid_map = raid_map;
1385 recover->map_length = mapped_length; 1366 recover->map_length = mapped_length;
1386 1367
1387 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); 1368 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1388 1369
1389 nmirrors = scrub_nr_raid_mirrors(bbio, raid_map); 1370 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1371
1390 for (mirror_index = 0; mirror_index < nmirrors; 1372 for (mirror_index = 0; mirror_index < nmirrors;
1391 mirror_index++) { 1373 mirror_index++) {
1392 struct scrub_block *sblock; 1374 struct scrub_block *sblock;
1393 struct scrub_page *page; 1375 struct scrub_page *page;
1394 1376
1395 if (mirror_index >= BTRFS_MAX_MIRRORS)
1396 continue;
1397
1398 sblock = sblocks_for_recheck + mirror_index; 1377 sblock = sblocks_for_recheck + mirror_index;
1399 sblock->sctx = sctx; 1378 sblock->sctx = sctx;
1400 page = kzalloc(sizeof(*page), GFP_NOFS); 1379 page = kzalloc(sizeof(*page), GFP_NOFS);
@@ -1410,9 +1389,12 @@ leave_nomem:
1410 sblock->pagev[page_index] = page; 1389 sblock->pagev[page_index] = page;
1411 page->logical = logical; 1390 page->logical = logical;
1412 1391
1413 scrub_stripe_index_and_offset(logical, raid_map, 1392 scrub_stripe_index_and_offset(logical,
1393 bbio->map_type,
1394 bbio->raid_map,
1414 mapped_length, 1395 mapped_length,
1415 bbio->num_stripes, 1396 bbio->num_stripes -
1397 bbio->num_tgtdevs,
1416 mirror_index, 1398 mirror_index,
1417 &stripe_index, 1399 &stripe_index,
1418 &stripe_offset); 1400 &stripe_offset);
@@ -1458,7 +1440,8 @@ static void scrub_bio_wait_endio(struct bio *bio, int error)
1458 1440
1459static inline int scrub_is_page_on_raid56(struct scrub_page *page) 1441static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1460{ 1442{
1461 return page->recover && page->recover->raid_map; 1443 return page->recover &&
1444 (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
1462} 1445}
1463 1446
1464static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, 1447static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
@@ -1475,7 +1458,6 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1475 bio->bi_end_io = scrub_bio_wait_endio; 1458 bio->bi_end_io = scrub_bio_wait_endio;
1476 1459
1477 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio, 1460 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
1478 page->recover->raid_map,
1479 page->recover->map_length, 1461 page->recover->map_length,
1480 page->mirror_num, 0); 1462 page->mirror_num, 0);
1481 if (ret) 1463 if (ret)
@@ -1615,8 +1597,7 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1615} 1597}
1616 1598
1617static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 1599static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1618 struct scrub_block *sblock_good, 1600 struct scrub_block *sblock_good)
1619 int force_write)
1620{ 1601{
1621 int page_num; 1602 int page_num;
1622 int ret = 0; 1603 int ret = 0;
@@ -1626,8 +1607,7 @@ static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1626 1607
1627 ret_sub = scrub_repair_page_from_good_copy(sblock_bad, 1608 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1628 sblock_good, 1609 sblock_good,
1629 page_num, 1610 page_num, 1);
1630 force_write);
1631 if (ret_sub) 1611 if (ret_sub)
1632 ret = ret_sub; 1612 ret = ret_sub;
1633 } 1613 }
@@ -2067,12 +2047,12 @@ static int scrub_checksum_super(struct scrub_block *sblock)
2067 2047
2068static void scrub_block_get(struct scrub_block *sblock) 2048static void scrub_block_get(struct scrub_block *sblock)
2069{ 2049{
2070 atomic_inc(&sblock->ref_count); 2050 atomic_inc(&sblock->refs);
2071} 2051}
2072 2052
2073static void scrub_block_put(struct scrub_block *sblock) 2053static void scrub_block_put(struct scrub_block *sblock)
2074{ 2054{
2075 if (atomic_dec_and_test(&sblock->ref_count)) { 2055 if (atomic_dec_and_test(&sblock->refs)) {
2076 int i; 2056 int i;
2077 2057
2078 if (sblock->sparity) 2058 if (sblock->sparity)
@@ -2086,12 +2066,12 @@ static void scrub_block_put(struct scrub_block *sblock)
2086 2066
2087static void scrub_page_get(struct scrub_page *spage) 2067static void scrub_page_get(struct scrub_page *spage)
2088{ 2068{
2089 atomic_inc(&spage->ref_count); 2069 atomic_inc(&spage->refs);
2090} 2070}
2091 2071
2092static void scrub_page_put(struct scrub_page *spage) 2072static void scrub_page_put(struct scrub_page *spage)
2093{ 2073{
2094 if (atomic_dec_and_test(&spage->ref_count)) { 2074 if (atomic_dec_and_test(&spage->refs)) {
2095 if (spage->page) 2075 if (spage->page)
2096 __free_page(spage->page); 2076 __free_page(spage->page);
2097 kfree(spage); 2077 kfree(spage);
@@ -2217,7 +2197,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2217 2197
2218 /* one ref inside this function, plus one for each page added to 2198 /* one ref inside this function, plus one for each page added to
2219 * a bio later on */ 2199 * a bio later on */
2220 atomic_set(&sblock->ref_count, 1); 2200 atomic_set(&sblock->refs, 1);
2221 sblock->sctx = sctx; 2201 sblock->sctx = sctx;
2222 sblock->no_io_error_seen = 1; 2202 sblock->no_io_error_seen = 1;
2223 2203
@@ -2510,7 +2490,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
2510 2490
2511 /* one ref inside this function, plus one for each page added to 2491 /* one ref inside this function, plus one for each page added to
2512 * a bio later on */ 2492 * a bio later on */
2513 atomic_set(&sblock->ref_count, 1); 2493 atomic_set(&sblock->refs, 1);
2514 sblock->sctx = sctx; 2494 sblock->sctx = sctx;
2515 sblock->no_io_error_seen = 1; 2495 sblock->no_io_error_seen = 1;
2516 sblock->sparity = sparity; 2496 sblock->sparity = sparity;
@@ -2705,7 +2685,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2705 struct btrfs_raid_bio *rbio; 2685 struct btrfs_raid_bio *rbio;
2706 struct scrub_page *spage; 2686 struct scrub_page *spage;
2707 struct btrfs_bio *bbio = NULL; 2687 struct btrfs_bio *bbio = NULL;
2708 u64 *raid_map = NULL;
2709 u64 length; 2688 u64 length;
2710 int ret; 2689 int ret;
2711 2690
@@ -2716,8 +2695,8 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2716 length = sparity->logic_end - sparity->logic_start + 1; 2695 length = sparity->logic_end - sparity->logic_start + 1;
2717 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE, 2696 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
2718 sparity->logic_start, 2697 sparity->logic_start,
2719 &length, &bbio, 0, &raid_map); 2698 &length, &bbio, 0, 1);
2720 if (ret || !bbio || !raid_map) 2699 if (ret || !bbio || !bbio->raid_map)
2721 goto bbio_out; 2700 goto bbio_out;
2722 2701
2723 bio = btrfs_io_bio_alloc(GFP_NOFS, 0); 2702 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
@@ -2729,8 +2708,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2729 bio->bi_end_io = scrub_parity_bio_endio; 2708 bio->bi_end_io = scrub_parity_bio_endio;
2730 2709
2731 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio, 2710 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
2732 raid_map, length, 2711 length, sparity->scrub_dev,
2733 sparity->scrub_dev,
2734 sparity->dbitmap, 2712 sparity->dbitmap,
2735 sparity->nsectors); 2713 sparity->nsectors);
2736 if (!rbio) 2714 if (!rbio)
@@ -2747,8 +2725,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2747rbio_out: 2725rbio_out:
2748 bio_put(bio); 2726 bio_put(bio);
2749bbio_out: 2727bbio_out:
2750 kfree(bbio); 2728 btrfs_put_bbio(bbio);
2751 kfree(raid_map);
2752 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, 2729 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2753 sparity->nsectors); 2730 sparity->nsectors);
2754 spin_lock(&sctx->stat_lock); 2731 spin_lock(&sctx->stat_lock);
@@ -2765,12 +2742,12 @@ static inline int scrub_calc_parity_bitmap_len(int nsectors)
2765 2742
2766static void scrub_parity_get(struct scrub_parity *sparity) 2743static void scrub_parity_get(struct scrub_parity *sparity)
2767{ 2744{
2768 atomic_inc(&sparity->ref_count); 2745 atomic_inc(&sparity->refs);
2769} 2746}
2770 2747
2771static void scrub_parity_put(struct scrub_parity *sparity) 2748static void scrub_parity_put(struct scrub_parity *sparity)
2772{ 2749{
2773 if (!atomic_dec_and_test(&sparity->ref_count)) 2750 if (!atomic_dec_and_test(&sparity->refs))
2774 return; 2751 return;
2775 2752
2776 scrub_parity_check_and_repair(sparity); 2753 scrub_parity_check_and_repair(sparity);
@@ -2820,7 +2797,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2820 sparity->scrub_dev = sdev; 2797 sparity->scrub_dev = sdev;
2821 sparity->logic_start = logic_start; 2798 sparity->logic_start = logic_start;
2822 sparity->logic_end = logic_end; 2799 sparity->logic_end = logic_end;
2823 atomic_set(&sparity->ref_count, 1); 2800 atomic_set(&sparity->refs, 1);
2824 INIT_LIST_HEAD(&sparity->spages); 2801 INIT_LIST_HEAD(&sparity->spages);
2825 sparity->dbitmap = sparity->bitmap; 2802 sparity->dbitmap = sparity->bitmap;
2826 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len; 2803 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
@@ -3037,8 +3014,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3037 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 3014 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3038 increment = map->stripe_len; 3015 increment = map->stripe_len;
3039 mirror_num = num % map->num_stripes + 1; 3016 mirror_num = num % map->num_stripes + 1;
3040 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 3017 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3041 BTRFS_BLOCK_GROUP_RAID6)) {
3042 get_raid56_logic_offset(physical, num, map, &offset, NULL); 3018 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3043 increment = map->stripe_len * nr_data_stripes(map); 3019 increment = map->stripe_len * nr_data_stripes(map);
3044 mirror_num = 1; 3020 mirror_num = 1;
@@ -3074,8 +3050,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3074 */ 3050 */
3075 logical = base + offset; 3051 logical = base + offset;
3076 physical_end = physical + nstripes * map->stripe_len; 3052 physical_end = physical + nstripes * map->stripe_len;
3077 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 3053 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3078 BTRFS_BLOCK_GROUP_RAID6)) {
3079 get_raid56_logic_offset(physical_end, num, 3054 get_raid56_logic_offset(physical_end, num,
3080 map, &logic_end, NULL); 3055 map, &logic_end, NULL);
3081 logic_end += base; 3056 logic_end += base;
@@ -3121,8 +3096,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3121 ret = 0; 3096 ret = 0;
3122 while (physical < physical_end) { 3097 while (physical < physical_end) {
3123 /* for raid56, we skip parity stripe */ 3098 /* for raid56, we skip parity stripe */
3124 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 3099 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3125 BTRFS_BLOCK_GROUP_RAID6)) {
3126 ret = get_raid56_logic_offset(physical, num, 3100 ret = get_raid56_logic_offset(physical, num,
3127 map, &logical, &stripe_logical); 3101 map, &logical, &stripe_logical);
3128 logical += base; 3102 logical += base;
@@ -3280,8 +3254,7 @@ again:
3280 scrub_free_csums(sctx); 3254 scrub_free_csums(sctx);
3281 if (extent_logical + extent_len < 3255 if (extent_logical + extent_len <
3282 key.objectid + bytes) { 3256 key.objectid + bytes) {
3283 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 3257 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3284 BTRFS_BLOCK_GROUP_RAID6)) {
3285 /* 3258 /*
3286 * loop until we find next data stripe 3259 * loop until we find next data stripe
3287 * or we have finished all stripes. 3260 * or we have finished all stripes.
@@ -3775,7 +3748,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3775 scrub_workers_put(fs_info); 3748 scrub_workers_put(fs_info);
3776 mutex_unlock(&fs_info->scrub_lock); 3749 mutex_unlock(&fs_info->scrub_lock);
3777 3750
3778 scrub_free_ctx(sctx); 3751 scrub_put_ctx(sctx);
3779 3752
3780 return ret; 3753 return ret;
3781} 3754}
@@ -3881,14 +3854,14 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
3881 &mapped_length, &bbio, 0); 3854 &mapped_length, &bbio, 0);
3882 if (ret || !bbio || mapped_length < extent_len || 3855 if (ret || !bbio || mapped_length < extent_len ||
3883 !bbio->stripes[0].dev->bdev) { 3856 !bbio->stripes[0].dev->bdev) {
3884 kfree(bbio); 3857 btrfs_put_bbio(bbio);
3885 return; 3858 return;
3886 } 3859 }
3887 3860
3888 *extent_physical = bbio->stripes[0].physical; 3861 *extent_physical = bbio->stripes[0].physical;
3889 *extent_mirror_num = bbio->mirror_num; 3862 *extent_mirror_num = bbio->mirror_num;
3890 *extent_dev = bbio->stripes[0].dev; 3863 *extent_dev = bbio->stripes[0].dev;
3891 kfree(bbio); 3864 btrfs_put_bbio(bbio);
3892} 3865}
3893 3866
3894static int scrub_setup_wr_ctx(struct scrub_ctx *sctx, 3867static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 804432dbc351..fe5857223515 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -2471,12 +2471,9 @@ verbose_printk("btrfs: send_utimes %llu\n", ino);
2471 if (ret < 0) 2471 if (ret < 0)
2472 goto out; 2472 goto out;
2473 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2473 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2474 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, 2474 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2475 btrfs_inode_atime(ii)); 2475 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2476 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, 2476 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2477 btrfs_inode_mtime(ii));
2478 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb,
2479 btrfs_inode_ctime(ii));
2480 /* TODO Add otime support when the otime patches get into upstream */ 2477 /* TODO Add otime support when the otime patches get into upstream */
2481 2478
2482 ret = send_cmd(sctx); 2479 ret = send_cmd(sctx);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 6f49b2872a64..05fef198ff94 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1958,11 +1958,6 @@ static int btrfs_freeze(struct super_block *sb)
1958 return btrfs_commit_transaction(trans, root); 1958 return btrfs_commit_transaction(trans, root);
1959} 1959}
1960 1960
1961static int btrfs_unfreeze(struct super_block *sb)
1962{
1963 return 0;
1964}
1965
1966static int btrfs_show_devname(struct seq_file *m, struct dentry *root) 1961static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
1967{ 1962{
1968 struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); 1963 struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
@@ -2011,7 +2006,6 @@ static const struct super_operations btrfs_super_ops = {
2011 .statfs = btrfs_statfs, 2006 .statfs = btrfs_statfs,
2012 .remount_fs = btrfs_remount, 2007 .remount_fs = btrfs_remount,
2013 .freeze_fs = btrfs_freeze, 2008 .freeze_fs = btrfs_freeze,
2014 .unfreeze_fs = btrfs_unfreeze,
2015}; 2009};
2016 2010
2017static const struct file_operations btrfs_ctl_fops = { 2011static const struct file_operations btrfs_ctl_fops = {
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 92db3f648df4..94edb0a2a026 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -733,10 +733,18 @@ int btrfs_init_sysfs(void)
733 733
734 ret = btrfs_init_debugfs(); 734 ret = btrfs_init_debugfs();
735 if (ret) 735 if (ret)
736 return ret; 736 goto out1;
737 737
738 init_feature_attrs(); 738 init_feature_attrs();
739 ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_feature_attr_group); 739 ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_feature_attr_group);
740 if (ret)
741 goto out2;
742
743 return 0;
744out2:
745 debugfs_remove_recursive(btrfs_debugfs_root_dentry);
746out1:
747 kset_unregister(btrfs_kset);
740 748
741 return ret; 749 return ret;
742} 750}
diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
index cc286ce97d1e..f51963a8f929 100644
--- a/fs/btrfs/tests/extent-buffer-tests.c
+++ b/fs/btrfs/tests/extent-buffer-tests.c
@@ -53,7 +53,7 @@ static int test_btrfs_split_item(void)
53 return -ENOMEM; 53 return -ENOMEM;
54 } 54 }
55 55
56 path->nodes[0] = eb = alloc_dummy_extent_buffer(0, 4096); 56 path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096);
57 if (!eb) { 57 if (!eb) {
58 test_msg("Could not allocate dummy buffer\n"); 58 test_msg("Could not allocate dummy buffer\n");
59 ret = -ENOMEM; 59 ret = -ENOMEM;
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 7e99c2f98dd0..9e9f2368177d 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -258,8 +258,7 @@ static int test_find_delalloc(void)
258 } 258 }
259 ret = 0; 259 ret = 0;
260out_bits: 260out_bits:
261 clear_extent_bits(&tmp, 0, total_dirty - 1, 261 clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_NOFS);
262 (unsigned long)-1, GFP_NOFS);
263out: 262out:
264 if (locked_page) 263 if (locked_page)
265 page_cache_release(locked_page); 264 page_cache_release(locked_page);
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 3ae0f5b8bb80..a116b55ce788 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -255,7 +255,7 @@ static noinline int test_btrfs_get_extent(void)
255 goto out; 255 goto out;
256 } 256 }
257 257
258 root->node = alloc_dummy_extent_buffer(0, 4096); 258 root->node = alloc_dummy_extent_buffer(NULL, 4096);
259 if (!root->node) { 259 if (!root->node) {
260 test_msg("Couldn't allocate dummy buffer\n"); 260 test_msg("Couldn't allocate dummy buffer\n");
261 goto out; 261 goto out;
@@ -843,7 +843,7 @@ static int test_hole_first(void)
843 goto out; 843 goto out;
844 } 844 }
845 845
846 root->node = alloc_dummy_extent_buffer(0, 4096); 846 root->node = alloc_dummy_extent_buffer(NULL, 4096);
847 if (!root->node) { 847 if (!root->node) {
848 test_msg("Couldn't allocate dummy buffer\n"); 848 test_msg("Couldn't allocate dummy buffer\n");
849 goto out; 849 goto out;
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index ec3dcb202357..73f299ebdabb 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -404,12 +404,22 @@ int btrfs_test_qgroups(void)
404 ret = -ENOMEM; 404 ret = -ENOMEM;
405 goto out; 405 goto out;
406 } 406 }
407 /* We are using this root as our extent root */
408 root->fs_info->extent_root = root;
409
410 /*
411 * Some of the paths we test assume we have a filled out fs_info, so we
412 * just need to add the root in there so we don't panic.
413 */
414 root->fs_info->tree_root = root;
415 root->fs_info->quota_root = root;
416 root->fs_info->quota_enabled = 1;
407 417
408 /* 418 /*
409 * Can't use bytenr 0, some things freak out 419 * Can't use bytenr 0, some things freak out
410 * *cough*backref walking code*cough* 420 * *cough*backref walking code*cough*
411 */ 421 */
412 root->node = alloc_test_extent_buffer(root->fs_info, 4096, 4096); 422 root->node = alloc_test_extent_buffer(root->fs_info, 4096);
413 if (!root->node) { 423 if (!root->node) {
414 test_msg("Couldn't allocate dummy buffer\n"); 424 test_msg("Couldn't allocate dummy buffer\n");
415 ret = -ENOMEM; 425 ret = -ENOMEM;
@@ -448,17 +458,6 @@ int btrfs_test_qgroups(void)
448 goto out; 458 goto out;
449 } 459 }
450 460
451 /* We are using this root as our extent root */
452 root->fs_info->extent_root = root;
453
454 /*
455 * Some of the paths we test assume we have a filled out fs_info, so we
456 * just need to addt he root in there so we don't panic.
457 */
458 root->fs_info->tree_root = root;
459 root->fs_info->quota_root = root;
460 root->fs_info->quota_enabled = 1;
461
462 test_msg("Running qgroup tests\n"); 461 test_msg("Running qgroup tests\n");
463 ret = test_no_shared_qgroup(root); 462 ret = test_no_shared_qgroup(root);
464 if (ret) 463 if (ret)
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index e88b59d13439..7e80f32550a6 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -220,6 +220,7 @@ loop:
220 * commit the transaction. 220 * commit the transaction.
221 */ 221 */
222 atomic_set(&cur_trans->use_count, 2); 222 atomic_set(&cur_trans->use_count, 2);
223 cur_trans->have_free_bgs = 0;
223 cur_trans->start_time = get_seconds(); 224 cur_trans->start_time = get_seconds();
224 225
225 cur_trans->delayed_refs.href_root = RB_ROOT; 226 cur_trans->delayed_refs.href_root = RB_ROOT;
@@ -248,6 +249,8 @@ loop:
248 INIT_LIST_HEAD(&cur_trans->pending_chunks); 249 INIT_LIST_HEAD(&cur_trans->pending_chunks);
249 INIT_LIST_HEAD(&cur_trans->switch_commits); 250 INIT_LIST_HEAD(&cur_trans->switch_commits);
250 INIT_LIST_HEAD(&cur_trans->pending_ordered); 251 INIT_LIST_HEAD(&cur_trans->pending_ordered);
252 INIT_LIST_HEAD(&cur_trans->dirty_bgs);
253 spin_lock_init(&cur_trans->dirty_bgs_lock);
251 list_add_tail(&cur_trans->list, &fs_info->trans_list); 254 list_add_tail(&cur_trans->list, &fs_info->trans_list);
252 extent_io_tree_init(&cur_trans->dirty_pages, 255 extent_io_tree_init(&cur_trans->dirty_pages,
253 fs_info->btree_inode->i_mapping); 256 fs_info->btree_inode->i_mapping);
@@ -1020,6 +1023,7 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
1020 u64 old_root_bytenr; 1023 u64 old_root_bytenr;
1021 u64 old_root_used; 1024 u64 old_root_used;
1022 struct btrfs_root *tree_root = root->fs_info->tree_root; 1025 struct btrfs_root *tree_root = root->fs_info->tree_root;
1026 bool extent_root = (root->objectid == BTRFS_EXTENT_TREE_OBJECTID);
1023 1027
1024 old_root_used = btrfs_root_used(&root->root_item); 1028 old_root_used = btrfs_root_used(&root->root_item);
1025 btrfs_write_dirty_block_groups(trans, root); 1029 btrfs_write_dirty_block_groups(trans, root);
@@ -1027,7 +1031,9 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
1027 while (1) { 1031 while (1) {
1028 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 1032 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1029 if (old_root_bytenr == root->node->start && 1033 if (old_root_bytenr == root->node->start &&
1030 old_root_used == btrfs_root_used(&root->root_item)) 1034 old_root_used == btrfs_root_used(&root->root_item) &&
1035 (!extent_root ||
1036 list_empty(&trans->transaction->dirty_bgs)))
1031 break; 1037 break;
1032 1038
1033 btrfs_set_root_node(&root->root_item, root->node); 1039 btrfs_set_root_node(&root->root_item, root->node);
@@ -1038,7 +1044,15 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
1038 return ret; 1044 return ret;
1039 1045
1040 old_root_used = btrfs_root_used(&root->root_item); 1046 old_root_used = btrfs_root_used(&root->root_item);
1041 ret = btrfs_write_dirty_block_groups(trans, root); 1047 if (extent_root) {
1048 ret = btrfs_write_dirty_block_groups(trans, root);
1049 if (ret)
1050 return ret;
1051 }
1052 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1053 if (ret)
1054 return ret;
1055 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1042 if (ret) 1056 if (ret)
1043 return ret; 1057 return ret;
1044 } 1058 }
@@ -1061,10 +1075,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
1061 struct extent_buffer *eb; 1075 struct extent_buffer *eb;
1062 int ret; 1076 int ret;
1063 1077
1064 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1065 if (ret)
1066 return ret;
1067
1068 eb = btrfs_lock_root_node(fs_info->tree_root); 1078 eb = btrfs_lock_root_node(fs_info->tree_root);
1069 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 1079 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1070 0, &eb); 1080 0, &eb);
@@ -1097,6 +1107,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
1097 next = fs_info->dirty_cowonly_roots.next; 1107 next = fs_info->dirty_cowonly_roots.next;
1098 list_del_init(next); 1108 list_del_init(next);
1099 root = list_entry(next, struct btrfs_root, dirty_list); 1109 root = list_entry(next, struct btrfs_root, dirty_list);
1110 clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1100 1111
1101 if (root != fs_info->extent_root) 1112 if (root != fs_info->extent_root)
1102 list_add_tail(&root->dirty_list, 1113 list_add_tail(&root->dirty_list,
@@ -1983,6 +1994,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1983 switch_commit_roots(cur_trans, root->fs_info); 1994 switch_commit_roots(cur_trans, root->fs_info);
1984 1995
1985 assert_qgroups_uptodate(trans); 1996 assert_qgroups_uptodate(trans);
1997 ASSERT(list_empty(&cur_trans->dirty_bgs));
1986 update_super_roots(root); 1998 update_super_roots(root);
1987 1999
1988 btrfs_set_super_log_root(root->fs_info->super_copy, 0); 2000 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
@@ -2026,6 +2038,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
2026 2038
2027 btrfs_finish_extent_commit(trans, root); 2039 btrfs_finish_extent_commit(trans, root);
2028 2040
2041 if (cur_trans->have_free_bgs)
2042 btrfs_clear_space_info_full(root->fs_info);
2043
2029 root->fs_info->last_trans_committed = cur_trans->transid; 2044 root->fs_info->last_trans_committed = cur_trans->transid;
2030 /* 2045 /*
2031 * We needn't acquire the lock here because there is no other task 2046 * We needn't acquire the lock here because there is no other task
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 00ed29c4b3f9..937050a2b68e 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -47,6 +47,11 @@ struct btrfs_transaction {
47 atomic_t num_writers; 47 atomic_t num_writers;
48 atomic_t use_count; 48 atomic_t use_count;
49 49
50 /*
51 * true if there is free bgs operations in this transaction
52 */
53 int have_free_bgs;
54
50 /* Be protected by fs_info->trans_lock when we want to change it. */ 55 /* Be protected by fs_info->trans_lock when we want to change it. */
51 enum btrfs_trans_state state; 56 enum btrfs_trans_state state;
52 struct list_head list; 57 struct list_head list;
@@ -58,6 +63,8 @@ struct btrfs_transaction {
58 struct list_head pending_chunks; 63 struct list_head pending_chunks;
59 struct list_head pending_ordered; 64 struct list_head pending_ordered;
60 struct list_head switch_commits; 65 struct list_head switch_commits;
66 struct list_head dirty_bgs;
67 spinlock_t dirty_bgs_lock;
61 struct btrfs_delayed_ref_root delayed_refs; 68 struct btrfs_delayed_ref_root delayed_refs;
62 int aborted; 69 int aborted;
63}; 70};
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 1a9585d4380a..9a37f8b39bae 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -453,11 +453,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
453insert: 453insert:
454 btrfs_release_path(path); 454 btrfs_release_path(path);
455 /* try to insert the key into the destination tree */ 455 /* try to insert the key into the destination tree */
456 path->skip_release_on_error = 1;
456 ret = btrfs_insert_empty_item(trans, root, path, 457 ret = btrfs_insert_empty_item(trans, root, path,
457 key, item_size); 458 key, item_size);
459 path->skip_release_on_error = 0;
458 460
459 /* make sure any existing item is the correct size */ 461 /* make sure any existing item is the correct size */
460 if (ret == -EEXIST) { 462 if (ret == -EEXIST || ret == -EOVERFLOW) {
461 u32 found_size; 463 u32 found_size;
462 found_size = btrfs_item_size_nr(path->nodes[0], 464 found_size = btrfs_item_size_nr(path->nodes[0],
463 path->slots[0]); 465 path->slots[0]);
@@ -488,8 +490,20 @@ insert:
488 src_item = (struct btrfs_inode_item *)src_ptr; 490 src_item = (struct btrfs_inode_item *)src_ptr;
489 dst_item = (struct btrfs_inode_item *)dst_ptr; 491 dst_item = (struct btrfs_inode_item *)dst_ptr;
490 492
491 if (btrfs_inode_generation(eb, src_item) == 0) 493 if (btrfs_inode_generation(eb, src_item) == 0) {
494 struct extent_buffer *dst_eb = path->nodes[0];
495
496 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
497 S_ISREG(btrfs_inode_mode(dst_eb, dst_item))) {
498 struct btrfs_map_token token;
499 u64 ino_size = btrfs_inode_size(eb, src_item);
500
501 btrfs_init_map_token(&token);
502 btrfs_set_token_inode_size(dst_eb, dst_item,
503 ino_size, &token);
504 }
492 goto no_copy; 505 goto no_copy;
506 }
493 507
494 if (overwrite_root && 508 if (overwrite_root &&
495 S_ISDIR(btrfs_inode_mode(eb, src_item)) && 509 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
@@ -844,7 +858,7 @@ out:
844static noinline int backref_in_log(struct btrfs_root *log, 858static noinline int backref_in_log(struct btrfs_root *log,
845 struct btrfs_key *key, 859 struct btrfs_key *key,
846 u64 ref_objectid, 860 u64 ref_objectid,
847 char *name, int namelen) 861 const char *name, int namelen)
848{ 862{
849 struct btrfs_path *path; 863 struct btrfs_path *path;
850 struct btrfs_inode_ref *ref; 864 struct btrfs_inode_ref *ref;
@@ -1254,13 +1268,14 @@ out:
1254} 1268}
1255 1269
1256static int insert_orphan_item(struct btrfs_trans_handle *trans, 1270static int insert_orphan_item(struct btrfs_trans_handle *trans,
1257 struct btrfs_root *root, u64 offset) 1271 struct btrfs_root *root, u64 ino)
1258{ 1272{
1259 int ret; 1273 int ret;
1260 ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID, 1274
1261 offset, BTRFS_ORPHAN_ITEM_KEY, NULL); 1275 ret = btrfs_insert_orphan_item(trans, root, ino);
1262 if (ret > 0) 1276 if (ret == -EEXIST)
1263 ret = btrfs_insert_orphan_item(trans, root, offset); 1277 ret = 0;
1278
1264 return ret; 1279 return ret;
1265} 1280}
1266 1281
@@ -1287,6 +1302,7 @@ static int count_inode_extrefs(struct btrfs_root *root,
1287 leaf = path->nodes[0]; 1302 leaf = path->nodes[0];
1288 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1303 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1289 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1304 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1305 cur_offset = 0;
1290 1306
1291 while (cur_offset < item_size) { 1307 while (cur_offset < item_size) {
1292 extref = (struct btrfs_inode_extref *) (ptr + cur_offset); 1308 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
@@ -1302,7 +1318,7 @@ static int count_inode_extrefs(struct btrfs_root *root,
1302 } 1318 }
1303 btrfs_release_path(path); 1319 btrfs_release_path(path);
1304 1320
1305 if (ret < 0) 1321 if (ret < 0 && ret != -ENOENT)
1306 return ret; 1322 return ret;
1307 return nlink; 1323 return nlink;
1308} 1324}
@@ -1394,9 +1410,6 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1394 nlink = ret; 1410 nlink = ret;
1395 1411
1396 ret = count_inode_extrefs(root, inode, path); 1412 ret = count_inode_extrefs(root, inode, path);
1397 if (ret == -ENOENT)
1398 ret = 0;
1399
1400 if (ret < 0) 1413 if (ret < 0)
1401 goto out; 1414 goto out;
1402 1415
@@ -1557,6 +1570,30 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1557} 1570}
1558 1571
1559/* 1572/*
1573 * Return true if an inode reference exists in the log for the given name,
1574 * inode and parent inode.
1575 */
1576static bool name_in_log_ref(struct btrfs_root *log_root,
1577 const char *name, const int name_len,
1578 const u64 dirid, const u64 ino)
1579{
1580 struct btrfs_key search_key;
1581
1582 search_key.objectid = ino;
1583 search_key.type = BTRFS_INODE_REF_KEY;
1584 search_key.offset = dirid;
1585 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1586 return true;
1587
1588 search_key.type = BTRFS_INODE_EXTREF_KEY;
1589 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1590 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1591 return true;
1592
1593 return false;
1594}
1595
1596/*
1560 * take a single entry in a log directory item and replay it into 1597 * take a single entry in a log directory item and replay it into
1561 * the subvolume. 1598 * the subvolume.
1562 * 1599 *
@@ -1666,10 +1703,17 @@ out:
1666 return ret; 1703 return ret;
1667 1704
1668insert: 1705insert:
1706 if (name_in_log_ref(root->log_root, name, name_len,
1707 key->objectid, log_key.objectid)) {
1708 /* The dentry will be added later. */
1709 ret = 0;
1710 update_size = false;
1711 goto out;
1712 }
1669 btrfs_release_path(path); 1713 btrfs_release_path(path);
1670 ret = insert_one_name(trans, root, path, key->objectid, key->offset, 1714 ret = insert_one_name(trans, root, path, key->objectid, key->offset,
1671 name, name_len, log_type, &log_key); 1715 name, name_len, log_type, &log_key);
1672 if (ret && ret != -ENOENT) 1716 if (ret && ret != -ENOENT && ret != -EEXIST)
1673 goto out; 1717 goto out;
1674 update_size = false; 1718 update_size = false;
1675 ret = 0; 1719 ret = 0;
@@ -2164,7 +2208,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2164 parent = path->nodes[*level]; 2208 parent = path->nodes[*level];
2165 root_owner = btrfs_header_owner(parent); 2209 root_owner = btrfs_header_owner(parent);
2166 2210
2167 next = btrfs_find_create_tree_block(root, bytenr, blocksize); 2211 next = btrfs_find_create_tree_block(root, bytenr);
2168 if (!next) 2212 if (!next)
2169 return -ENOMEM; 2213 return -ENOMEM;
2170 2214
@@ -2416,8 +2460,8 @@ static void wait_for_writer(struct btrfs_trans_handle *trans,
2416 mutex_unlock(&root->log_mutex); 2460 mutex_unlock(&root->log_mutex);
2417 if (atomic_read(&root->log_writers)) 2461 if (atomic_read(&root->log_writers))
2418 schedule(); 2462 schedule();
2419 mutex_lock(&root->log_mutex);
2420 finish_wait(&root->log_writer_wait, &wait); 2463 finish_wait(&root->log_writer_wait, &wait);
2464 mutex_lock(&root->log_mutex);
2421 } 2465 }
2422} 2466}
2423 2467
@@ -3219,7 +3263,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
3219static void fill_inode_item(struct btrfs_trans_handle *trans, 3263static void fill_inode_item(struct btrfs_trans_handle *trans,
3220 struct extent_buffer *leaf, 3264 struct extent_buffer *leaf,
3221 struct btrfs_inode_item *item, 3265 struct btrfs_inode_item *item,
3222 struct inode *inode, int log_inode_only) 3266 struct inode *inode, int log_inode_only,
3267 u64 logged_isize)
3223{ 3268{
3224 struct btrfs_map_token token; 3269 struct btrfs_map_token token;
3225 3270
@@ -3232,7 +3277,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
3232 * to say 'update this inode with these values' 3277 * to say 'update this inode with these values'
3233 */ 3278 */
3234 btrfs_set_token_inode_generation(leaf, item, 0, &token); 3279 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3235 btrfs_set_token_inode_size(leaf, item, 0, &token); 3280 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3236 } else { 3281 } else {
3237 btrfs_set_token_inode_generation(leaf, item, 3282 btrfs_set_token_inode_generation(leaf, item,
3238 BTRFS_I(inode)->generation, 3283 BTRFS_I(inode)->generation,
@@ -3245,19 +3290,19 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
3245 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3290 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3246 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3291 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3247 3292
3248 btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item), 3293 btrfs_set_token_timespec_sec(leaf, &item->atime,
3249 inode->i_atime.tv_sec, &token); 3294 inode->i_atime.tv_sec, &token);
3250 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item), 3295 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3251 inode->i_atime.tv_nsec, &token); 3296 inode->i_atime.tv_nsec, &token);
3252 3297
3253 btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item), 3298 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3254 inode->i_mtime.tv_sec, &token); 3299 inode->i_mtime.tv_sec, &token);
3255 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item), 3300 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3256 inode->i_mtime.tv_nsec, &token); 3301 inode->i_mtime.tv_nsec, &token);
3257 3302
3258 btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item), 3303 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3259 inode->i_ctime.tv_sec, &token); 3304 inode->i_ctime.tv_sec, &token);
3260 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item), 3305 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3261 inode->i_ctime.tv_nsec, &token); 3306 inode->i_ctime.tv_nsec, &token);
3262 3307
3263 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3308 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
@@ -3284,7 +3329,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
3284 return ret; 3329 return ret;
3285 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3330 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3286 struct btrfs_inode_item); 3331 struct btrfs_inode_item);
3287 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0); 3332 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0);
3288 btrfs_release_path(path); 3333 btrfs_release_path(path);
3289 return 0; 3334 return 0;
3290} 3335}
@@ -3293,7 +3338,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
3293 struct inode *inode, 3338 struct inode *inode,
3294 struct btrfs_path *dst_path, 3339 struct btrfs_path *dst_path,
3295 struct btrfs_path *src_path, u64 *last_extent, 3340 struct btrfs_path *src_path, u64 *last_extent,
3296 int start_slot, int nr, int inode_only) 3341 int start_slot, int nr, int inode_only,
3342 u64 logged_isize)
3297{ 3343{
3298 unsigned long src_offset; 3344 unsigned long src_offset;
3299 unsigned long dst_offset; 3345 unsigned long dst_offset;
@@ -3350,7 +3396,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
3350 dst_path->slots[0], 3396 dst_path->slots[0],
3351 struct btrfs_inode_item); 3397 struct btrfs_inode_item);
3352 fill_inode_item(trans, dst_path->nodes[0], inode_item, 3398 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3353 inode, inode_only == LOG_INODE_EXISTS); 3399 inode, inode_only == LOG_INODE_EXISTS,
3400 logged_isize);
3354 } else { 3401 } else {
3355 copy_extent_buffer(dst_path->nodes[0], src, dst_offset, 3402 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3356 src_offset, ins_sizes[i]); 3403 src_offset, ins_sizes[i]);
@@ -3902,6 +3949,33 @@ process:
3902 return ret; 3949 return ret;
3903} 3950}
3904 3951
3952static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
3953 struct btrfs_path *path, u64 *size_ret)
3954{
3955 struct btrfs_key key;
3956 int ret;
3957
3958 key.objectid = btrfs_ino(inode);
3959 key.type = BTRFS_INODE_ITEM_KEY;
3960 key.offset = 0;
3961
3962 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
3963 if (ret < 0) {
3964 return ret;
3965 } else if (ret > 0) {
3966 *size_ret = i_size_read(inode);
3967 } else {
3968 struct btrfs_inode_item *item;
3969
3970 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3971 struct btrfs_inode_item);
3972 *size_ret = btrfs_inode_size(path->nodes[0], item);
3973 }
3974
3975 btrfs_release_path(path);
3976 return 0;
3977}
3978
3905/* log a single inode in the tree log. 3979/* log a single inode in the tree log.
3906 * At least one parent directory for this inode must exist in the tree 3980 * At least one parent directory for this inode must exist in the tree
3907 * or be logged already. 3981 * or be logged already.
@@ -3939,6 +4013,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3939 bool fast_search = false; 4013 bool fast_search = false;
3940 u64 ino = btrfs_ino(inode); 4014 u64 ino = btrfs_ino(inode);
3941 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 4015 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4016 u64 logged_isize = 0;
3942 4017
3943 path = btrfs_alloc_path(); 4018 path = btrfs_alloc_path();
3944 if (!path) 4019 if (!path)
@@ -3966,15 +4041,22 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3966 max_key.type = (u8)-1; 4041 max_key.type = (u8)-1;
3967 max_key.offset = (u64)-1; 4042 max_key.offset = (u64)-1;
3968 4043
3969 /* Only run delayed items if we are a dir or a new file */ 4044 /*
4045 * Only run delayed items if we are a dir or a new file.
4046 * Otherwise commit the delayed inode only, which is needed in
4047 * order for the log replay code to mark inodes for link count
4048 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4049 */
3970 if (S_ISDIR(inode->i_mode) || 4050 if (S_ISDIR(inode->i_mode) ||
3971 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) { 4051 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed)
3972 ret = btrfs_commit_inode_delayed_items(trans, inode); 4052 ret = btrfs_commit_inode_delayed_items(trans, inode);
3973 if (ret) { 4053 else
3974 btrfs_free_path(path); 4054 ret = btrfs_commit_inode_delayed_inode(inode);
3975 btrfs_free_path(dst_path); 4055
3976 return ret; 4056 if (ret) {
3977 } 4057 btrfs_free_path(path);
4058 btrfs_free_path(dst_path);
4059 return ret;
3978 } 4060 }
3979 4061
3980 mutex_lock(&BTRFS_I(inode)->log_mutex); 4062 mutex_lock(&BTRFS_I(inode)->log_mutex);
@@ -3988,22 +4070,56 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3988 if (S_ISDIR(inode->i_mode)) { 4070 if (S_ISDIR(inode->i_mode)) {
3989 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; 4071 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
3990 4072
3991 if (inode_only == LOG_INODE_EXISTS) 4073 if (inode_only == LOG_INODE_EXISTS) {
3992 max_key_type = BTRFS_XATTR_ITEM_KEY; 4074 max_key_type = BTRFS_INODE_EXTREF_KEY;
4075 max_key.type = max_key_type;
4076 }
3993 ret = drop_objectid_items(trans, log, path, ino, max_key_type); 4077 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
3994 } else { 4078 } else {
3995 if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4079 if (inode_only == LOG_INODE_EXISTS) {
3996 &BTRFS_I(inode)->runtime_flags)) { 4080 /*
3997 clear_bit(BTRFS_INODE_COPY_EVERYTHING, 4081 * Make sure the new inode item we write to the log has
3998 &BTRFS_I(inode)->runtime_flags); 4082 * the same isize as the current one (if it exists).
3999 ret = btrfs_truncate_inode_items(trans, log, 4083 * This is necessary to prevent data loss after log
4000 inode, 0, 0); 4084 * replay, and also to prevent doing a wrong expanding
4001 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, 4085 * truncate - for e.g. create file, write 4K into offset
4002 &BTRFS_I(inode)->runtime_flags) || 4086 * 0, fsync, write 4K into offset 4096, add hard link,
4087 * fsync some other file (to sync log), power fail - if
4088 * we use the inode's current i_size, after log replay
4089 * we get a 8Kb file, with the last 4Kb extent as a hole
4090 * (zeroes), as if an expanding truncate happened,
4091 * instead of getting a file of 4Kb only.
4092 */
4093 err = logged_inode_size(log, inode, path,
4094 &logged_isize);
4095 if (err)
4096 goto out_unlock;
4097 }
4098 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4099 &BTRFS_I(inode)->runtime_flags)) {
4100 if (inode_only == LOG_INODE_EXISTS) {
4101 max_key.type = BTRFS_INODE_EXTREF_KEY;
4102 ret = drop_objectid_items(trans, log, path, ino,
4103 max_key.type);
4104 } else {
4105 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4106 &BTRFS_I(inode)->runtime_flags);
4107 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4108 &BTRFS_I(inode)->runtime_flags);
4109 ret = btrfs_truncate_inode_items(trans, log,
4110 inode, 0, 0);
4111 }
4112 } else if (test_bit(BTRFS_INODE_COPY_EVERYTHING,
4113 &BTRFS_I(inode)->runtime_flags) ||
4003 inode_only == LOG_INODE_EXISTS) { 4114 inode_only == LOG_INODE_EXISTS) {
4004 if (inode_only == LOG_INODE_ALL) 4115 if (inode_only == LOG_INODE_ALL) {
4116 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4117 &BTRFS_I(inode)->runtime_flags);
4005 fast_search = true; 4118 fast_search = true;
4006 max_key.type = BTRFS_XATTR_ITEM_KEY; 4119 max_key.type = BTRFS_XATTR_ITEM_KEY;
4120 } else {
4121 max_key.type = BTRFS_INODE_EXTREF_KEY;
4122 }
4007 ret = drop_objectid_items(trans, log, path, ino, 4123 ret = drop_objectid_items(trans, log, path, ino,
4008 max_key.type); 4124 max_key.type);
4009 } else { 4125 } else {
@@ -4047,7 +4163,8 @@ again:
4047 } 4163 }
4048 4164
4049 ret = copy_items(trans, inode, dst_path, path, &last_extent, 4165 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4050 ins_start_slot, ins_nr, inode_only); 4166 ins_start_slot, ins_nr, inode_only,
4167 logged_isize);
4051 if (ret < 0) { 4168 if (ret < 0) {
4052 err = ret; 4169 err = ret;
4053 goto out_unlock; 4170 goto out_unlock;
@@ -4071,7 +4188,7 @@ next_slot:
4071 if (ins_nr) { 4188 if (ins_nr) {
4072 ret = copy_items(trans, inode, dst_path, path, 4189 ret = copy_items(trans, inode, dst_path, path,
4073 &last_extent, ins_start_slot, 4190 &last_extent, ins_start_slot,
4074 ins_nr, inode_only); 4191 ins_nr, inode_only, logged_isize);
4075 if (ret < 0) { 4192 if (ret < 0) {
4076 err = ret; 4193 err = ret;
4077 goto out_unlock; 4194 goto out_unlock;
@@ -4092,7 +4209,8 @@ next_slot:
4092 } 4209 }
4093 if (ins_nr) { 4210 if (ins_nr) {
4094 ret = copy_items(trans, inode, dst_path, path, &last_extent, 4211 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4095 ins_start_slot, ins_nr, inode_only); 4212 ins_start_slot, ins_nr, inode_only,
4213 logged_isize);
4096 if (ret < 0) { 4214 if (ret < 0) {
4097 err = ret; 4215 err = ret;
4098 goto out_unlock; 4216 goto out_unlock;
@@ -4273,6 +4391,9 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
4273 struct dentry *old_parent = NULL; 4391 struct dentry *old_parent = NULL;
4274 int ret = 0; 4392 int ret = 0;
4275 u64 last_committed = root->fs_info->last_trans_committed; 4393 u64 last_committed = root->fs_info->last_trans_committed;
4394 const struct dentry * const first_parent = parent;
4395 const bool did_unlink = (BTRFS_I(inode)->last_unlink_trans >
4396 last_committed);
4276 4397
4277 sb = inode->i_sb; 4398 sb = inode->i_sb;
4278 4399
@@ -4328,7 +4449,6 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
4328 goto end_trans; 4449 goto end_trans;
4329 } 4450 }
4330 4451
4331 inode_only = LOG_INODE_EXISTS;
4332 while (1) { 4452 while (1) {
4333 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) 4453 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4334 break; 4454 break;
@@ -4337,8 +4457,22 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
4337 if (root != BTRFS_I(inode)->root) 4457 if (root != BTRFS_I(inode)->root)
4338 break; 4458 break;
4339 4459
4460 /*
4461 * On unlink we must make sure our immediate parent directory
4462 * inode is fully logged. This is to prevent leaving dangling
4463 * directory index entries and a wrong directory inode's i_size.
4464 * Not doing so can result in a directory being impossible to
4465 * delete after log replay (rmdir will always fail with error
4466 * -ENOTEMPTY).
4467 */
4468 if (did_unlink && parent == first_parent)
4469 inode_only = LOG_INODE_ALL;
4470 else
4471 inode_only = LOG_INODE_EXISTS;
4472
4340 if (BTRFS_I(inode)->generation > 4473 if (BTRFS_I(inode)->generation >
4341 root->fs_info->last_trans_committed) { 4474 root->fs_info->last_trans_committed ||
4475 inode_only == LOG_INODE_ALL) {
4342 ret = btrfs_log_inode(trans, root, inode, inode_only, 4476 ret = btrfs_log_inode(trans, root, inode, inode_only,
4343 0, LLONG_MAX, ctx); 4477 0, LLONG_MAX, ctx);
4344 if (ret) 4478 if (ret)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 50c5a8762aed..8222f6f74147 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1310,6 +1310,8 @@ again:
1310 if (ret) { 1310 if (ret) {
1311 btrfs_error(root->fs_info, ret, 1311 btrfs_error(root->fs_info, ret,
1312 "Failed to remove dev extent item"); 1312 "Failed to remove dev extent item");
1313 } else {
1314 trans->transaction->have_free_bgs = 1;
1313 } 1315 }
1314out: 1316out:
1315 btrfs_free_path(path); 1317 btrfs_free_path(path);
@@ -4196,7 +4198,7 @@ static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
4196 4198
4197static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) 4199static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4198{ 4200{
4199 if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))) 4201 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4200 return; 4202 return;
4201 4203
4202 btrfs_set_fs_incompat(info, RAID56); 4204 btrfs_set_fs_incompat(info, RAID56);
@@ -4803,10 +4805,8 @@ unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4803 4805
4804 BUG_ON(em->start > logical || em->start + em->len < logical); 4806 BUG_ON(em->start > logical || em->start + em->len < logical);
4805 map = (struct map_lookup *)em->bdev; 4807 map = (struct map_lookup *)em->bdev;
4806 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 4808 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
4807 BTRFS_BLOCK_GROUP_RAID6)) {
4808 len = map->stripe_len * nr_data_stripes(map); 4809 len = map->stripe_len * nr_data_stripes(map);
4809 }
4810 free_extent_map(em); 4810 free_extent_map(em);
4811 return len; 4811 return len;
4812} 4812}
@@ -4826,8 +4826,7 @@ int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4826 4826
4827 BUG_ON(em->start > logical || em->start + em->len < logical); 4827 BUG_ON(em->start > logical || em->start + em->len < logical);
4828 map = (struct map_lookup *)em->bdev; 4828 map = (struct map_lookup *)em->bdev;
4829 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 4829 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
4830 BTRFS_BLOCK_GROUP_RAID6))
4831 ret = 1; 4830 ret = 1;
4832 free_extent_map(em); 4831 free_extent_map(em);
4833 return ret; 4832 return ret;
@@ -4876,32 +4875,24 @@ static inline int parity_smaller(u64 a, u64 b)
4876} 4875}
4877 4876
4878/* Bubble-sort the stripe set to put the parity/syndrome stripes last */ 4877/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
4879static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map) 4878static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
4880{ 4879{
4881 struct btrfs_bio_stripe s; 4880 struct btrfs_bio_stripe s;
4882 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
4883 int i; 4881 int i;
4884 u64 l; 4882 u64 l;
4885 int again = 1; 4883 int again = 1;
4886 int m;
4887 4884
4888 while (again) { 4885 while (again) {
4889 again = 0; 4886 again = 0;
4890 for (i = 0; i < real_stripes - 1; i++) { 4887 for (i = 0; i < num_stripes - 1; i++) {
4891 if (parity_smaller(raid_map[i], raid_map[i+1])) { 4888 if (parity_smaller(bbio->raid_map[i],
4889 bbio->raid_map[i+1])) {
4892 s = bbio->stripes[i]; 4890 s = bbio->stripes[i];
4893 l = raid_map[i]; 4891 l = bbio->raid_map[i];
4894 bbio->stripes[i] = bbio->stripes[i+1]; 4892 bbio->stripes[i] = bbio->stripes[i+1];
4895 raid_map[i] = raid_map[i+1]; 4893 bbio->raid_map[i] = bbio->raid_map[i+1];
4896 bbio->stripes[i+1] = s; 4894 bbio->stripes[i+1] = s;
4897 raid_map[i+1] = l; 4895 bbio->raid_map[i+1] = l;
4898
4899 if (bbio->tgtdev_map) {
4900 m = bbio->tgtdev_map[i];
4901 bbio->tgtdev_map[i] =
4902 bbio->tgtdev_map[i + 1];
4903 bbio->tgtdev_map[i + 1] = m;
4904 }
4905 4896
4906 again = 1; 4897 again = 1;
4907 } 4898 }
@@ -4909,10 +4900,48 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4909 } 4900 }
4910} 4901}
4911 4902
4903static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
4904{
4905 struct btrfs_bio *bbio = kzalloc(
4906 /* the size of the btrfs_bio */
4907 sizeof(struct btrfs_bio) +
4908 /* plus the variable array for the stripes */
4909 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
4910 /* plus the variable array for the tgt dev */
4911 sizeof(int) * (real_stripes) +
4912 /*
4913 * plus the raid_map, which includes both the tgt dev
4914 * and the stripes
4915 */
4916 sizeof(u64) * (total_stripes),
4917 GFP_NOFS);
4918 if (!bbio)
4919 return NULL;
4920
4921 atomic_set(&bbio->error, 0);
4922 atomic_set(&bbio->refs, 1);
4923
4924 return bbio;
4925}
4926
4927void btrfs_get_bbio(struct btrfs_bio *bbio)
4928{
4929 WARN_ON(!atomic_read(&bbio->refs));
4930 atomic_inc(&bbio->refs);
4931}
4932
4933void btrfs_put_bbio(struct btrfs_bio *bbio)
4934{
4935 if (!bbio)
4936 return;
4937 if (atomic_dec_and_test(&bbio->refs))
4938 kfree(bbio);
4939}
4940
4912static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, 4941static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4913 u64 logical, u64 *length, 4942 u64 logical, u64 *length,
4914 struct btrfs_bio **bbio_ret, 4943 struct btrfs_bio **bbio_ret,
4915 int mirror_num, u64 **raid_map_ret) 4944 int mirror_num, int need_raid_map)
4916{ 4945{
4917 struct extent_map *em; 4946 struct extent_map *em;
4918 struct map_lookup *map; 4947 struct map_lookup *map;
@@ -4925,7 +4954,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4925 u64 stripe_nr_orig; 4954 u64 stripe_nr_orig;
4926 u64 stripe_nr_end; 4955 u64 stripe_nr_end;
4927 u64 stripe_len; 4956 u64 stripe_len;
4928 u64 *raid_map = NULL;
4929 int stripe_index; 4957 int stripe_index;
4930 int i; 4958 int i;
4931 int ret = 0; 4959 int ret = 0;
@@ -4976,7 +5004,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4976 stripe_offset = offset - stripe_offset; 5004 stripe_offset = offset - stripe_offset;
4977 5005
4978 /* if we're here for raid56, we need to know the stripe aligned start */ 5006 /* if we're here for raid56, we need to know the stripe aligned start */
4979 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { 5007 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
4980 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map); 5008 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
4981 raid56_full_stripe_start = offset; 5009 raid56_full_stripe_start = offset;
4982 5010
@@ -4989,8 +5017,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4989 5017
4990 if (rw & REQ_DISCARD) { 5018 if (rw & REQ_DISCARD) {
4991 /* we don't discard raid56 yet */ 5019 /* we don't discard raid56 yet */
4992 if (map->type & 5020 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
4993 (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4994 ret = -EOPNOTSUPP; 5021 ret = -EOPNOTSUPP;
4995 goto out; 5022 goto out;
4996 } 5023 }
@@ -5000,7 +5027,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5000 /* For writes to RAID[56], allow a full stripeset across all disks. 5027 /* For writes to RAID[56], allow a full stripeset across all disks.
5001 For other RAID types and for RAID[56] reads, just allow a single 5028 For other RAID types and for RAID[56] reads, just allow a single
5002 stripe (on a single disk). */ 5029 stripe (on a single disk). */
5003 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) && 5030 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
5004 (rw & REQ_WRITE)) { 5031 (rw & REQ_WRITE)) {
5005 max_len = stripe_len * nr_data_stripes(map) - 5032 max_len = stripe_len * nr_data_stripes(map) -
5006 (offset - raid56_full_stripe_start); 5033 (offset - raid56_full_stripe_start);
@@ -5047,7 +5074,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5047 u64 physical_of_found = 0; 5074 u64 physical_of_found = 0;
5048 5075
5049 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, 5076 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5050 logical, &tmp_length, &tmp_bbio, 0, NULL); 5077 logical, &tmp_length, &tmp_bbio, 0, 0);
5051 if (ret) { 5078 if (ret) {
5052 WARN_ON(tmp_bbio != NULL); 5079 WARN_ON(tmp_bbio != NULL);
5053 goto out; 5080 goto out;
@@ -5061,7 +5088,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5061 * is not left of the left cursor 5088 * is not left of the left cursor
5062 */ 5089 */
5063 ret = -EIO; 5090 ret = -EIO;
5064 kfree(tmp_bbio); 5091 btrfs_put_bbio(tmp_bbio);
5065 goto out; 5092 goto out;
5066 } 5093 }
5067 5094
@@ -5096,11 +5123,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5096 } else { 5123 } else {
5097 WARN_ON(1); 5124 WARN_ON(1);
5098 ret = -EIO; 5125 ret = -EIO;
5099 kfree(tmp_bbio); 5126 btrfs_put_bbio(tmp_bbio);
5100 goto out; 5127 goto out;
5101 } 5128 }
5102 5129
5103 kfree(tmp_bbio); 5130 btrfs_put_bbio(tmp_bbio);
5104 } else if (mirror_num > map->num_stripes) { 5131 } else if (mirror_num > map->num_stripes) {
5105 mirror_num = 0; 5132 mirror_num = 0;
5106 } 5133 }
@@ -5166,15 +5193,10 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5166 mirror_num = stripe_index - old_stripe_index + 1; 5193 mirror_num = stripe_index - old_stripe_index + 1;
5167 } 5194 }
5168 5195
5169 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 5196 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5170 BTRFS_BLOCK_GROUP_RAID6)) { 5197 if (need_raid_map &&
5171 u64 tmp;
5172
5173 if (raid_map_ret &&
5174 ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || 5198 ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5175 mirror_num > 1)) { 5199 mirror_num > 1)) {
5176 int i, rot;
5177
5178 /* push stripe_nr back to the start of the full stripe */ 5200 /* push stripe_nr back to the start of the full stripe */
5179 stripe_nr = raid56_full_stripe_start; 5201 stripe_nr = raid56_full_stripe_start;
5180 do_div(stripe_nr, stripe_len * nr_data_stripes(map)); 5202 do_div(stripe_nr, stripe_len * nr_data_stripes(map));
@@ -5183,32 +5205,12 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5183 num_stripes = map->num_stripes; 5205 num_stripes = map->num_stripes;
5184 max_errors = nr_parity_stripes(map); 5206 max_errors = nr_parity_stripes(map);
5185 5207
5186 raid_map = kmalloc_array(num_stripes, sizeof(u64),
5187 GFP_NOFS);
5188 if (!raid_map) {
5189 ret = -ENOMEM;
5190 goto out;
5191 }
5192
5193 /* Work out the disk rotation on this stripe-set */
5194 tmp = stripe_nr;
5195 rot = do_div(tmp, num_stripes);
5196
5197 /* Fill in the logical address of each stripe */
5198 tmp = stripe_nr * nr_data_stripes(map);
5199 for (i = 0; i < nr_data_stripes(map); i++)
5200 raid_map[(i+rot) % num_stripes] =
5201 em->start + (tmp + i) * map->stripe_len;
5202
5203 raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5204 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5205 raid_map[(i+rot+1) % num_stripes] =
5206 RAID6_Q_STRIPE;
5207
5208 *length = map->stripe_len; 5208 *length = map->stripe_len;
5209 stripe_index = 0; 5209 stripe_index = 0;
5210 stripe_offset = 0; 5210 stripe_offset = 0;
5211 } else { 5211 } else {
5212 u64 tmp;
5213
5212 /* 5214 /*
5213 * Mirror #0 or #1 means the original data block. 5215 * Mirror #0 or #1 means the original data block.
5214 * Mirror #2 is RAID5 parity block. 5216 * Mirror #2 is RAID5 parity block.
@@ -5246,17 +5248,42 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5246 tgtdev_indexes = num_stripes; 5248 tgtdev_indexes = num_stripes;
5247 } 5249 }
5248 5250
5249 bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes), 5251 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
5250 GFP_NOFS);
5251 if (!bbio) { 5252 if (!bbio) {
5252 kfree(raid_map);
5253 ret = -ENOMEM; 5253 ret = -ENOMEM;
5254 goto out; 5254 goto out;
5255 } 5255 }
5256 atomic_set(&bbio->error, 0);
5257 if (dev_replace_is_ongoing) 5256 if (dev_replace_is_ongoing)
5258 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); 5257 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5259 5258
5259 /* build raid_map */
5260 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
5261 need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5262 mirror_num > 1)) {
5263 u64 tmp;
5264 int i, rot;
5265
5266 bbio->raid_map = (u64 *)((void *)bbio->stripes +
5267 sizeof(struct btrfs_bio_stripe) *
5268 num_alloc_stripes +
5269 sizeof(int) * tgtdev_indexes);
5270
5271 /* Work out the disk rotation on this stripe-set */
5272 tmp = stripe_nr;
5273 rot = do_div(tmp, num_stripes);
5274
5275 /* Fill in the logical address of each stripe */
5276 tmp = stripe_nr * nr_data_stripes(map);
5277 for (i = 0; i < nr_data_stripes(map); i++)
5278 bbio->raid_map[(i+rot) % num_stripes] =
5279 em->start + (tmp + i) * map->stripe_len;
5280
5281 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5282 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5283 bbio->raid_map[(i+rot+1) % num_stripes] =
5284 RAID6_Q_STRIPE;
5285 }
5286
5260 if (rw & REQ_DISCARD) { 5287 if (rw & REQ_DISCARD) {
5261 int factor = 0; 5288 int factor = 0;
5262 int sub_stripes = 0; 5289 int sub_stripes = 0;
@@ -5340,6 +5367,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5340 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) 5367 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5341 max_errors = btrfs_chunk_max_errors(map); 5368 max_errors = btrfs_chunk_max_errors(map);
5342 5369
5370 if (bbio->raid_map)
5371 sort_parity_stripes(bbio, num_stripes);
5372
5343 tgtdev_indexes = 0; 5373 tgtdev_indexes = 0;
5344 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && 5374 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5345 dev_replace->tgtdev != NULL) { 5375 dev_replace->tgtdev != NULL) {
@@ -5427,6 +5457,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5427 } 5457 }
5428 5458
5429 *bbio_ret = bbio; 5459 *bbio_ret = bbio;
5460 bbio->map_type = map->type;
5430 bbio->num_stripes = num_stripes; 5461 bbio->num_stripes = num_stripes;
5431 bbio->max_errors = max_errors; 5462 bbio->max_errors = max_errors;
5432 bbio->mirror_num = mirror_num; 5463 bbio->mirror_num = mirror_num;
@@ -5443,10 +5474,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5443 bbio->stripes[0].physical = physical_to_patch_in_first_stripe; 5474 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5444 bbio->mirror_num = map->num_stripes + 1; 5475 bbio->mirror_num = map->num_stripes + 1;
5445 } 5476 }
5446 if (raid_map) {
5447 sort_parity_stripes(bbio, raid_map);
5448 *raid_map_ret = raid_map;
5449 }
5450out: 5477out:
5451 if (dev_replace_is_ongoing) 5478 if (dev_replace_is_ongoing)
5452 btrfs_dev_replace_unlock(dev_replace); 5479 btrfs_dev_replace_unlock(dev_replace);
@@ -5459,17 +5486,17 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5459 struct btrfs_bio **bbio_ret, int mirror_num) 5486 struct btrfs_bio **bbio_ret, int mirror_num)
5460{ 5487{
5461 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, 5488 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5462 mirror_num, NULL); 5489 mirror_num, 0);
5463} 5490}
5464 5491
5465/* For Scrub/replace */ 5492/* For Scrub/replace */
5466int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, 5493int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
5467 u64 logical, u64 *length, 5494 u64 logical, u64 *length,
5468 struct btrfs_bio **bbio_ret, int mirror_num, 5495 struct btrfs_bio **bbio_ret, int mirror_num,
5469 u64 **raid_map_ret) 5496 int need_raid_map)
5470{ 5497{
5471 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, 5498 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5472 mirror_num, raid_map_ret); 5499 mirror_num, need_raid_map);
5473} 5500}
5474 5501
5475int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 5502int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@@ -5511,8 +5538,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5511 do_div(length, map->num_stripes / map->sub_stripes); 5538 do_div(length, map->num_stripes / map->sub_stripes);
5512 else if (map->type & BTRFS_BLOCK_GROUP_RAID0) 5539 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5513 do_div(length, map->num_stripes); 5540 do_div(length, map->num_stripes);
5514 else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 5541 else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5515 BTRFS_BLOCK_GROUP_RAID6)) {
5516 do_div(length, nr_data_stripes(map)); 5542 do_div(length, nr_data_stripes(map));
5517 rmap_len = map->stripe_len * nr_data_stripes(map); 5543 rmap_len = map->stripe_len * nr_data_stripes(map);
5518 } 5544 }
@@ -5565,7 +5591,7 @@ static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int e
5565 bio_endio_nodec(bio, err); 5591 bio_endio_nodec(bio, err);
5566 else 5592 else
5567 bio_endio(bio, err); 5593 bio_endio(bio, err);
5568 kfree(bbio); 5594 btrfs_put_bbio(bbio);
5569} 5595}
5570 5596
5571static void btrfs_end_bio(struct bio *bio, int err) 5597static void btrfs_end_bio(struct bio *bio, int err)
@@ -5808,7 +5834,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5808 u64 logical = (u64)bio->bi_iter.bi_sector << 9; 5834 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
5809 u64 length = 0; 5835 u64 length = 0;
5810 u64 map_length; 5836 u64 map_length;
5811 u64 *raid_map = NULL;
5812 int ret; 5837 int ret;
5813 int dev_nr = 0; 5838 int dev_nr = 0;
5814 int total_devs = 1; 5839 int total_devs = 1;
@@ -5819,7 +5844,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5819 5844
5820 btrfs_bio_counter_inc_blocked(root->fs_info); 5845 btrfs_bio_counter_inc_blocked(root->fs_info);
5821 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, 5846 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5822 mirror_num, &raid_map); 5847 mirror_num, 1);
5823 if (ret) { 5848 if (ret) {
5824 btrfs_bio_counter_dec(root->fs_info); 5849 btrfs_bio_counter_dec(root->fs_info);
5825 return ret; 5850 return ret;
@@ -5832,15 +5857,13 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5832 bbio->fs_info = root->fs_info; 5857 bbio->fs_info = root->fs_info;
5833 atomic_set(&bbio->stripes_pending, bbio->num_stripes); 5858 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5834 5859
5835 if (raid_map) { 5860 if (bbio->raid_map) {
5836 /* In this case, map_length has been set to the length of 5861 /* In this case, map_length has been set to the length of
5837 a single stripe; not the whole write */ 5862 a single stripe; not the whole write */
5838 if (rw & WRITE) { 5863 if (rw & WRITE) {
5839 ret = raid56_parity_write(root, bio, bbio, 5864 ret = raid56_parity_write(root, bio, bbio, map_length);
5840 raid_map, map_length);
5841 } else { 5865 } else {
5842 ret = raid56_parity_recover(root, bio, bbio, 5866 ret = raid56_parity_recover(root, bio, bbio, map_length,
5843 raid_map, map_length,
5844 mirror_num, 1); 5867 mirror_num, 1);
5845 } 5868 }
5846 5869
@@ -6238,17 +6261,22 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6238 struct extent_buffer *sb; 6261 struct extent_buffer *sb;
6239 struct btrfs_disk_key *disk_key; 6262 struct btrfs_disk_key *disk_key;
6240 struct btrfs_chunk *chunk; 6263 struct btrfs_chunk *chunk;
6241 u8 *ptr; 6264 u8 *array_ptr;
6242 unsigned long sb_ptr; 6265 unsigned long sb_array_offset;
6243 int ret = 0; 6266 int ret = 0;
6244 u32 num_stripes; 6267 u32 num_stripes;
6245 u32 array_size; 6268 u32 array_size;
6246 u32 len = 0; 6269 u32 len = 0;
6247 u32 cur; 6270 u32 cur_offset;
6248 struct btrfs_key key; 6271 struct btrfs_key key;
6249 6272
6250 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET, 6273 ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
6251 BTRFS_SUPER_INFO_SIZE); 6274 /*
6275 * This will create extent buffer of nodesize, superblock size is
6276 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6277 * overallocate but we can keep it as-is, only the first page is used.
6278 */
6279 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6252 if (!sb) 6280 if (!sb)
6253 return -ENOMEM; 6281 return -ENOMEM;
6254 btrfs_set_buffer_uptodate(sb); 6282 btrfs_set_buffer_uptodate(sb);
@@ -6271,35 +6299,56 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6271 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 6299 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6272 array_size = btrfs_super_sys_array_size(super_copy); 6300 array_size = btrfs_super_sys_array_size(super_copy);
6273 6301
6274 ptr = super_copy->sys_chunk_array; 6302 array_ptr = super_copy->sys_chunk_array;
6275 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array); 6303 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6276 cur = 0; 6304 cur_offset = 0;
6305
6306 while (cur_offset < array_size) {
6307 disk_key = (struct btrfs_disk_key *)array_ptr;
6308 len = sizeof(*disk_key);
6309 if (cur_offset + len > array_size)
6310 goto out_short_read;
6277 6311
6278 while (cur < array_size) {
6279 disk_key = (struct btrfs_disk_key *)ptr;
6280 btrfs_disk_key_to_cpu(&key, disk_key); 6312 btrfs_disk_key_to_cpu(&key, disk_key);
6281 6313
6282 len = sizeof(*disk_key); ptr += len; 6314 array_ptr += len;
6283 sb_ptr += len; 6315 sb_array_offset += len;
6284 cur += len; 6316 cur_offset += len;
6285 6317
6286 if (key.type == BTRFS_CHUNK_ITEM_KEY) { 6318 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6287 chunk = (struct btrfs_chunk *)sb_ptr; 6319 chunk = (struct btrfs_chunk *)sb_array_offset;
6320 /*
6321 * At least one btrfs_chunk with one stripe must be
6322 * present, exact stripe count check comes afterwards
6323 */
6324 len = btrfs_chunk_item_size(1);
6325 if (cur_offset + len > array_size)
6326 goto out_short_read;
6327
6328 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6329 len = btrfs_chunk_item_size(num_stripes);
6330 if (cur_offset + len > array_size)
6331 goto out_short_read;
6332
6288 ret = read_one_chunk(root, &key, sb, chunk); 6333 ret = read_one_chunk(root, &key, sb, chunk);
6289 if (ret) 6334 if (ret)
6290 break; 6335 break;
6291 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6292 len = btrfs_chunk_item_size(num_stripes);
6293 } else { 6336 } else {
6294 ret = -EIO; 6337 ret = -EIO;
6295 break; 6338 break;
6296 } 6339 }
6297 ptr += len; 6340 array_ptr += len;
6298 sb_ptr += len; 6341 sb_array_offset += len;
6299 cur += len; 6342 cur_offset += len;
6300 } 6343 }
6301 free_extent_buffer(sb); 6344 free_extent_buffer(sb);
6302 return ret; 6345 return ret;
6346
6347out_short_read:
6348 printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
6349 len, cur_offset);
6350 free_extent_buffer(sb);
6351 return -EIO;
6303} 6352}
6304 6353
6305int btrfs_read_chunk_tree(struct btrfs_root *root) 6354int btrfs_read_chunk_tree(struct btrfs_root *root)
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index d6fe73c0f4a2..83069dec6898 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -295,8 +295,10 @@ typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
295#define BTRFS_BIO_ORIG_BIO_SUBMITTED (1 << 0) 295#define BTRFS_BIO_ORIG_BIO_SUBMITTED (1 << 0)
296 296
297struct btrfs_bio { 297struct btrfs_bio {
298 atomic_t refs;
298 atomic_t stripes_pending; 299 atomic_t stripes_pending;
299 struct btrfs_fs_info *fs_info; 300 struct btrfs_fs_info *fs_info;
301 u64 map_type; /* get from map_lookup->type */
300 bio_end_io_t *end_io; 302 bio_end_io_t *end_io;
301 struct bio *orig_bio; 303 struct bio *orig_bio;
302 unsigned long flags; 304 unsigned long flags;
@@ -307,6 +309,12 @@ struct btrfs_bio {
307 int mirror_num; 309 int mirror_num;
308 int num_tgtdevs; 310 int num_tgtdevs;
309 int *tgtdev_map; 311 int *tgtdev_map;
312 /*
313 * logical block numbers for the start of each stripe
314 * The last one or two are p/q. These are sorted,
315 * so raid_map[0] is the start of our full stripe
316 */
317 u64 *raid_map;
310 struct btrfs_bio_stripe stripes[]; 318 struct btrfs_bio_stripe stripes[];
311}; 319};
312 320
@@ -388,19 +396,15 @@ struct btrfs_balance_control {
388 396
389int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, 397int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
390 u64 end, u64 *length); 398 u64 end, u64 *length);
391 399void btrfs_get_bbio(struct btrfs_bio *bbio);
392#define btrfs_bio_size(total_stripes, real_stripes) \ 400void btrfs_put_bbio(struct btrfs_bio *bbio);
393 (sizeof(struct btrfs_bio) + \
394 (sizeof(struct btrfs_bio_stripe) * (total_stripes)) + \
395 (sizeof(int) * (real_stripes)))
396
397int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, 401int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
398 u64 logical, u64 *length, 402 u64 logical, u64 *length,
399 struct btrfs_bio **bbio_ret, int mirror_num); 403 struct btrfs_bio **bbio_ret, int mirror_num);
400int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, 404int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
401 u64 logical, u64 *length, 405 u64 logical, u64 *length,
402 struct btrfs_bio **bbio_ret, int mirror_num, 406 struct btrfs_bio **bbio_ret, int mirror_num,
403 u64 **raid_map_ret); 407 int need_raid_map);
404int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 408int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
405 u64 chunk_start, u64 physical, u64 devid, 409 u64 chunk_start, u64 physical, u64 devid,
406 u64 **logical, int *naddrs, int *stripe_len); 410 u64 **logical, int *naddrs, int *stripe_len);
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index ce1b115dcc28..f601def05bdf 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -574,7 +574,7 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
574 /* extract the directory dentry from the cwd */ 574 /* extract the directory dentry from the cwd */
575 get_fs_pwd(current->fs, &path); 575 get_fs_pwd(current->fs, &path);
576 576
577 if (!S_ISDIR(path.dentry->d_inode->i_mode)) 577 if (!d_can_lookup(path.dentry))
578 goto notdir; 578 goto notdir;
579 579
580 cachefiles_begin_secure(cache, &saved_cred); 580 cachefiles_begin_secure(cache, &saved_cred);
@@ -646,7 +646,7 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
646 /* extract the directory dentry from the cwd */ 646 /* extract the directory dentry from the cwd */
647 get_fs_pwd(current->fs, &path); 647 get_fs_pwd(current->fs, &path);
648 648
649 if (!S_ISDIR(path.dentry->d_inode->i_mode)) 649 if (!d_can_lookup(path.dentry))
650 goto notdir; 650 goto notdir;
651 651
652 cachefiles_begin_secure(cache, &saved_cred); 652 cachefiles_begin_secure(cache, &saved_cred);
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 1c7293c3a93a..232426214fdd 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -437,7 +437,7 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
437 if (!object->backer) 437 if (!object->backer)
438 return -ENOBUFS; 438 return -ENOBUFS;
439 439
440 ASSERT(S_ISREG(object->backer->d_inode->i_mode)); 440 ASSERT(d_is_reg(object->backer));
441 441
442 fscache_set_store_limit(&object->fscache, ni_size); 442 fscache_set_store_limit(&object->fscache, ni_size);
443 443
@@ -501,7 +501,7 @@ static void cachefiles_invalidate_object(struct fscache_operation *op)
501 op->object->debug_id, (unsigned long long)ni_size); 501 op->object->debug_id, (unsigned long long)ni_size);
502 502
503 if (object->backer) { 503 if (object->backer) {
504 ASSERT(S_ISREG(object->backer->d_inode->i_mode)); 504 ASSERT(d_is_reg(object->backer));
505 505
506 fscache_set_store_limit(&object->fscache, ni_size); 506 fscache_set_store_limit(&object->fscache, ni_size);
507 507
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 7f8e83f9d74e..1e51714eb33e 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -277,7 +277,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
277 _debug("remove %p from %p", rep, dir); 277 _debug("remove %p from %p", rep, dir);
278 278
279 /* non-directories can just be unlinked */ 279 /* non-directories can just be unlinked */
280 if (!S_ISDIR(rep->d_inode->i_mode)) { 280 if (!d_is_dir(rep)) {
281 _debug("unlink stale object"); 281 _debug("unlink stale object");
282 282
283 path.mnt = cache->mnt; 283 path.mnt = cache->mnt;
@@ -323,7 +323,7 @@ try_again:
323 return 0; 323 return 0;
324 } 324 }
325 325
326 if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) { 326 if (!d_can_lookup(cache->graveyard)) {
327 unlock_rename(cache->graveyard, dir); 327 unlock_rename(cache->graveyard, dir);
328 cachefiles_io_error(cache, "Graveyard no longer a directory"); 328 cachefiles_io_error(cache, "Graveyard no longer a directory");
329 return -EIO; 329 return -EIO;
@@ -475,7 +475,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
475 ASSERT(parent->dentry); 475 ASSERT(parent->dentry);
476 ASSERT(parent->dentry->d_inode); 476 ASSERT(parent->dentry->d_inode);
477 477
478 if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) { 478 if (!(d_is_dir(parent->dentry))) {
479 // TODO: convert file to dir 479 // TODO: convert file to dir
480 _leave("looking up in none directory"); 480 _leave("looking up in none directory");
481 return -ENOBUFS; 481 return -ENOBUFS;
@@ -539,7 +539,7 @@ lookup_again:
539 _debug("mkdir -> %p{%p{ino=%lu}}", 539 _debug("mkdir -> %p{%p{ino=%lu}}",
540 next, next->d_inode, next->d_inode->i_ino); 540 next, next->d_inode, next->d_inode->i_ino);
541 541
542 } else if (!S_ISDIR(next->d_inode->i_mode)) { 542 } else if (!d_can_lookup(next)) {
543 pr_err("inode %lu is not a directory\n", 543 pr_err("inode %lu is not a directory\n",
544 next->d_inode->i_ino); 544 next->d_inode->i_ino);
545 ret = -ENOBUFS; 545 ret = -ENOBUFS;
@@ -568,8 +568,8 @@ lookup_again:
568 _debug("create -> %p{%p{ino=%lu}}", 568 _debug("create -> %p{%p{ino=%lu}}",
569 next, next->d_inode, next->d_inode->i_ino); 569 next, next->d_inode, next->d_inode->i_ino);
570 570
571 } else if (!S_ISDIR(next->d_inode->i_mode) && 571 } else if (!d_can_lookup(next) &&
572 !S_ISREG(next->d_inode->i_mode) 572 !d_is_reg(next)
573 ) { 573 ) {
574 pr_err("inode %lu is not a file or directory\n", 574 pr_err("inode %lu is not a file or directory\n",
575 next->d_inode->i_ino); 575 next->d_inode->i_ino);
@@ -642,7 +642,7 @@ lookup_again:
642 642
643 /* open a file interface onto a data file */ 643 /* open a file interface onto a data file */
644 if (object->type != FSCACHE_COOKIE_TYPE_INDEX) { 644 if (object->type != FSCACHE_COOKIE_TYPE_INDEX) {
645 if (S_ISREG(object->dentry->d_inode->i_mode)) { 645 if (d_is_reg(object->dentry)) {
646 const struct address_space_operations *aops; 646 const struct address_space_operations *aops;
647 647
648 ret = -EPERM; 648 ret = -EPERM;
@@ -763,7 +763,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
763 /* we need to make sure the subdir is a directory */ 763 /* we need to make sure the subdir is a directory */
764 ASSERT(subdir->d_inode); 764 ASSERT(subdir->d_inode);
765 765
766 if (!S_ISDIR(subdir->d_inode->i_mode)) { 766 if (!d_can_lookup(subdir)) {
767 pr_err("%s is not a directory\n", dirname); 767 pr_err("%s is not a directory\n", dirname);
768 ret = -EIO; 768 ret = -EIO;
769 goto check_error; 769 goto check_error;
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 616db0e77b44..c6cd8d7a4eef 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -900,7 +900,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
900 return -ENOBUFS; 900 return -ENOBUFS;
901 } 901 }
902 902
903 ASSERT(S_ISREG(object->backer->d_inode->i_mode)); 903 ASSERT(d_is_reg(object->backer));
904 904
905 cache = container_of(object->fscache.cache, 905 cache = container_of(object->fscache.cache,
906 struct cachefiles_cache, cache); 906 struct cachefiles_cache, cache);
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 5bd853ba44ff..64fa248343f6 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -40,20 +40,6 @@ static inline void ceph_set_cached_acl(struct inode *inode,
40 spin_unlock(&ci->i_ceph_lock); 40 spin_unlock(&ci->i_ceph_lock);
41} 41}
42 42
43static inline struct posix_acl *ceph_get_cached_acl(struct inode *inode,
44 int type)
45{
46 struct ceph_inode_info *ci = ceph_inode(inode);
47 struct posix_acl *acl = ACL_NOT_CACHED;
48
49 spin_lock(&ci->i_ceph_lock);
50 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0))
51 acl = get_cached_acl(inode, type);
52 spin_unlock(&ci->i_ceph_lock);
53
54 return acl;
55}
56
57struct posix_acl *ceph_get_acl(struct inode *inode, int type) 43struct posix_acl *ceph_get_acl(struct inode *inode, int type)
58{ 44{
59 int size; 45 int size;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 24be059fd1f8..fd5599d32362 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -196,17 +196,22 @@ static int readpage_nounlock(struct file *filp, struct page *page)
196 u64 len = PAGE_CACHE_SIZE; 196 u64 len = PAGE_CACHE_SIZE;
197 197
198 if (off >= i_size_read(inode)) { 198 if (off >= i_size_read(inode)) {
199 zero_user_segment(page, err, PAGE_CACHE_SIZE); 199 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
200 SetPageUptodate(page); 200 SetPageUptodate(page);
201 return 0; 201 return 0;
202 } 202 }
203 203
204 /* 204 if (ci->i_inline_version != CEPH_INLINE_NONE) {
205 * Uptodate inline data should have been added into page cache 205 /*
206 * while getting Fcr caps. 206 * Uptodate inline data should have been added
207 */ 207 * into page cache while getting Fcr caps.
208 if (ci->i_inline_version != CEPH_INLINE_NONE) 208 */
209 return -EINVAL; 209 if (off == 0)
210 return -EINVAL;
211 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
212 SetPageUptodate(page);
213 return 0;
214 }
210 215
211 err = ceph_readpage_from_fscache(inode, page); 216 err = ceph_readpage_from_fscache(inode, page);
212 if (err == 0) 217 if (err == 0)
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index b93c631c6c87..8172775428a0 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -577,7 +577,6 @@ void ceph_add_cap(struct inode *inode,
577 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc, 577 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
578 realmino); 578 realmino);
579 if (realm) { 579 if (realm) {
580 ceph_get_snap_realm(mdsc, realm);
581 spin_lock(&realm->inodes_with_caps_lock); 580 spin_lock(&realm->inodes_with_caps_lock);
582 ci->i_snap_realm = realm; 581 ci->i_snap_realm = realm;
583 list_add(&ci->i_snap_realm_item, 582 list_add(&ci->i_snap_realm_item,
@@ -1451,8 +1450,8 @@ static int __mark_caps_flushing(struct inode *inode,
1451 spin_lock(&mdsc->cap_dirty_lock); 1450 spin_lock(&mdsc->cap_dirty_lock);
1452 list_del_init(&ci->i_dirty_item); 1451 list_del_init(&ci->i_dirty_item);
1453 1452
1454 ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
1455 if (list_empty(&ci->i_flushing_item)) { 1453 if (list_empty(&ci->i_flushing_item)) {
1454 ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
1456 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); 1455 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1457 mdsc->num_cap_flushing++; 1456 mdsc->num_cap_flushing++;
1458 dout(" inode %p now flushing seq %lld\n", inode, 1457 dout(" inode %p now flushing seq %lld\n", inode,
@@ -2073,17 +2072,16 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got)
2073 * requested from the MDS. 2072 * requested from the MDS.
2074 */ 2073 */
2075static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, 2074static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2076 loff_t endoff, int *got, struct page **pinned_page, 2075 loff_t endoff, int *got, int *check_max, int *err)
2077 int *check_max, int *err)
2078{ 2076{
2079 struct inode *inode = &ci->vfs_inode; 2077 struct inode *inode = &ci->vfs_inode;
2080 int ret = 0; 2078 int ret = 0;
2081 int have, implemented, _got = 0; 2079 int have, implemented;
2082 int file_wanted; 2080 int file_wanted;
2083 2081
2084 dout("get_cap_refs %p need %s want %s\n", inode, 2082 dout("get_cap_refs %p need %s want %s\n", inode,
2085 ceph_cap_string(need), ceph_cap_string(want)); 2083 ceph_cap_string(need), ceph_cap_string(want));
2086again: 2084
2087 spin_lock(&ci->i_ceph_lock); 2085 spin_lock(&ci->i_ceph_lock);
2088 2086
2089 /* make sure file is actually open */ 2087 /* make sure file is actually open */
@@ -2138,50 +2136,34 @@ again:
2138 inode, ceph_cap_string(have), ceph_cap_string(not), 2136 inode, ceph_cap_string(have), ceph_cap_string(not),
2139 ceph_cap_string(revoking)); 2137 ceph_cap_string(revoking));
2140 if ((revoking & not) == 0) { 2138 if ((revoking & not) == 0) {
2141 _got = need | (have & want); 2139 *got = need | (have & want);
2142 __take_cap_refs(ci, _got); 2140 __take_cap_refs(ci, *got);
2143 ret = 1; 2141 ret = 1;
2144 } 2142 }
2145 } else { 2143 } else {
2144 int session_readonly = false;
2145 if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) {
2146 struct ceph_mds_session *s = ci->i_auth_cap->session;
2147 spin_lock(&s->s_cap_lock);
2148 session_readonly = s->s_readonly;
2149 spin_unlock(&s->s_cap_lock);
2150 }
2151 if (session_readonly) {
2152 dout("get_cap_refs %p needed %s but mds%d readonly\n",
2153 inode, ceph_cap_string(need), ci->i_auth_cap->mds);
2154 *err = -EROFS;
2155 ret = 1;
2156 goto out_unlock;
2157 }
2158
2146 dout("get_cap_refs %p have %s needed %s\n", inode, 2159 dout("get_cap_refs %p have %s needed %s\n", inode,
2147 ceph_cap_string(have), ceph_cap_string(need)); 2160 ceph_cap_string(have), ceph_cap_string(need));
2148 } 2161 }
2149out_unlock: 2162out_unlock:
2150 spin_unlock(&ci->i_ceph_lock); 2163 spin_unlock(&ci->i_ceph_lock);
2151 2164
2152 if (ci->i_inline_version != CEPH_INLINE_NONE &&
2153 (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
2154 i_size_read(inode) > 0) {
2155 int ret1;
2156 struct page *page = find_get_page(inode->i_mapping, 0);
2157 if (page) {
2158 if (PageUptodate(page)) {
2159 *pinned_page = page;
2160 goto out;
2161 }
2162 page_cache_release(page);
2163 }
2164 /*
2165 * drop cap refs first because getattr while holding
2166 * caps refs can cause deadlock.
2167 */
2168 ceph_put_cap_refs(ci, _got);
2169 _got = 0;
2170
2171 /* getattr request will bring inline data into page cache */
2172 ret1 = __ceph_do_getattr(inode, NULL,
2173 CEPH_STAT_CAP_INLINE_DATA, true);
2174 if (ret1 >= 0) {
2175 ret = 0;
2176 goto again;
2177 }
2178 *err = ret1;
2179 ret = 1;
2180 }
2181out:
2182 dout("get_cap_refs %p ret %d got %s\n", inode, 2165 dout("get_cap_refs %p ret %d got %s\n", inode,
2183 ret, ceph_cap_string(_got)); 2166 ret, ceph_cap_string(*got));
2184 *got = _got;
2185 return ret; 2167 return ret;
2186} 2168}
2187 2169
@@ -2221,22 +2203,52 @@ static void check_max_size(struct inode *inode, loff_t endoff)
2221int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, 2203int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2222 loff_t endoff, int *got, struct page **pinned_page) 2204 loff_t endoff, int *got, struct page **pinned_page)
2223{ 2205{
2224 int check_max, ret, err; 2206 int _got, check_max, ret, err = 0;
2225 2207
2226retry: 2208retry:
2227 if (endoff > 0) 2209 if (endoff > 0)
2228 check_max_size(&ci->vfs_inode, endoff); 2210 check_max_size(&ci->vfs_inode, endoff);
2211 _got = 0;
2229 check_max = 0; 2212 check_max = 0;
2230 err = 0;
2231 ret = wait_event_interruptible(ci->i_cap_wq, 2213 ret = wait_event_interruptible(ci->i_cap_wq,
2232 try_get_cap_refs(ci, need, want, endoff, 2214 try_get_cap_refs(ci, need, want, endoff,
2233 got, pinned_page, 2215 &_got, &check_max, &err));
2234 &check_max, &err));
2235 if (err) 2216 if (err)
2236 ret = err; 2217 ret = err;
2218 if (ret < 0)
2219 return ret;
2220
2237 if (check_max) 2221 if (check_max)
2238 goto retry; 2222 goto retry;
2239 return ret; 2223
2224 if (ci->i_inline_version != CEPH_INLINE_NONE &&
2225 (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
2226 i_size_read(&ci->vfs_inode) > 0) {
2227 struct page *page = find_get_page(ci->vfs_inode.i_mapping, 0);
2228 if (page) {
2229 if (PageUptodate(page)) {
2230 *pinned_page = page;
2231 goto out;
2232 }
2233 page_cache_release(page);
2234 }
2235 /*
2236 * drop cap refs first because getattr while holding
2237 * caps refs can cause deadlock.
2238 */
2239 ceph_put_cap_refs(ci, _got);
2240 _got = 0;
2241
2242 /* getattr request will bring inline data into page cache */
2243 ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
2244 CEPH_STAT_CAP_INLINE_DATA, true);
2245 if (ret < 0)
2246 return ret;
2247 goto retry;
2248 }
2249out:
2250 *got = _got;
2251 return 0;
2240} 2252}
2241 2253
2242/* 2254/*
@@ -2432,13 +2444,13 @@ static void invalidate_aliases(struct inode *inode)
2432 */ 2444 */
2433static void handle_cap_grant(struct ceph_mds_client *mdsc, 2445static void handle_cap_grant(struct ceph_mds_client *mdsc,
2434 struct inode *inode, struct ceph_mds_caps *grant, 2446 struct inode *inode, struct ceph_mds_caps *grant,
2435 void *snaptrace, int snaptrace_len,
2436 u64 inline_version, 2447 u64 inline_version,
2437 void *inline_data, int inline_len, 2448 void *inline_data, int inline_len,
2438 struct ceph_buffer *xattr_buf, 2449 struct ceph_buffer *xattr_buf,
2439 struct ceph_mds_session *session, 2450 struct ceph_mds_session *session,
2440 struct ceph_cap *cap, int issued) 2451 struct ceph_cap *cap, int issued)
2441 __releases(ci->i_ceph_lock) 2452 __releases(ci->i_ceph_lock)
2453 __releases(mdsc->snap_rwsem)
2442{ 2454{
2443 struct ceph_inode_info *ci = ceph_inode(inode); 2455 struct ceph_inode_info *ci = ceph_inode(inode);
2444 int mds = session->s_mds; 2456 int mds = session->s_mds;
@@ -2639,10 +2651,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
2639 spin_unlock(&ci->i_ceph_lock); 2651 spin_unlock(&ci->i_ceph_lock);
2640 2652
2641 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) { 2653 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
2642 down_write(&mdsc->snap_rwsem);
2643 ceph_update_snap_trace(mdsc, snaptrace,
2644 snaptrace + snaptrace_len, false);
2645 downgrade_write(&mdsc->snap_rwsem);
2646 kick_flushing_inode_caps(mdsc, session, inode); 2654 kick_flushing_inode_caps(mdsc, session, inode);
2647 up_read(&mdsc->snap_rwsem); 2655 up_read(&mdsc->snap_rwsem);
2648 if (newcaps & ~issued) 2656 if (newcaps & ~issued)
@@ -3052,6 +3060,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
3052 struct ceph_cap *cap; 3060 struct ceph_cap *cap;
3053 struct ceph_mds_caps *h; 3061 struct ceph_mds_caps *h;
3054 struct ceph_mds_cap_peer *peer = NULL; 3062 struct ceph_mds_cap_peer *peer = NULL;
3063 struct ceph_snap_realm *realm;
3055 int mds = session->s_mds; 3064 int mds = session->s_mds;
3056 int op, issued; 3065 int op, issued;
3057 u32 seq, mseq; 3066 u32 seq, mseq;
@@ -3153,11 +3162,23 @@ void ceph_handle_caps(struct ceph_mds_session *session,
3153 goto done_unlocked; 3162 goto done_unlocked;
3154 3163
3155 case CEPH_CAP_OP_IMPORT: 3164 case CEPH_CAP_OP_IMPORT:
3165 realm = NULL;
3166 if (snaptrace_len) {
3167 down_write(&mdsc->snap_rwsem);
3168 ceph_update_snap_trace(mdsc, snaptrace,
3169 snaptrace + snaptrace_len,
3170 false, &realm);
3171 downgrade_write(&mdsc->snap_rwsem);
3172 } else {
3173 down_read(&mdsc->snap_rwsem);
3174 }
3156 handle_cap_import(mdsc, inode, h, peer, session, 3175 handle_cap_import(mdsc, inode, h, peer, session,
3157 &cap, &issued); 3176 &cap, &issued);
3158 handle_cap_grant(mdsc, inode, h, snaptrace, snaptrace_len, 3177 handle_cap_grant(mdsc, inode, h,
3159 inline_version, inline_data, inline_len, 3178 inline_version, inline_data, inline_len,
3160 msg->middle, session, cap, issued); 3179 msg->middle, session, cap, issued);
3180 if (realm)
3181 ceph_put_snap_realm(mdsc, realm);
3161 goto done_unlocked; 3182 goto done_unlocked;
3162 } 3183 }
3163 3184
@@ -3177,7 +3198,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
3177 case CEPH_CAP_OP_GRANT: 3198 case CEPH_CAP_OP_GRANT:
3178 __ceph_caps_issued(ci, &issued); 3199 __ceph_caps_issued(ci, &issued);
3179 issued |= __ceph_caps_dirty(ci); 3200 issued |= __ceph_caps_dirty(ci);
3180 handle_cap_grant(mdsc, inode, h, NULL, 0, 3201 handle_cap_grant(mdsc, inode, h,
3181 inline_version, inline_data, inline_len, 3202 inline_version, inline_data, inline_len,
3182 msg->middle, session, cap, issued); 3203 msg->middle, session, cap, issued);
3183 goto done_unlocked; 3204 goto done_unlocked;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index c241603764fd..83e9976f7189 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -26,8 +26,6 @@
26 * point by name. 26 * point by name.
27 */ 27 */
28 28
29const struct inode_operations ceph_dir_iops;
30const struct file_operations ceph_dir_fops;
31const struct dentry_operations ceph_dentry_ops; 29const struct dentry_operations ceph_dentry_ops;
32 30
33/* 31/*
@@ -672,13 +670,17 @@ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
672 /* 670 /*
673 * We created the item, then did a lookup, and found 671 * We created the item, then did a lookup, and found
674 * it was already linked to another inode we already 672 * it was already linked to another inode we already
675 * had in our cache (and thus got spliced). Link our 673 * had in our cache (and thus got spliced). To not
676 * dentry to that inode, but don't hash it, just in 674 * confuse VFS (especially when inode is a directory),
677 * case the VFS wants to dereference it. 675 * we don't link our dentry to that inode, return an
676 * error instead.
677 *
678 * This event should be rare and it happens only when
679 * we talk to old MDS. Recent MDS does not send traceless
680 * reply for request that creates new inode.
678 */ 681 */
679 BUG_ON(!result->d_inode); 682 d_drop(result);
680 d_instantiate(dentry, result->d_inode); 683 return -ESTALE;
681 return 0;
682 } 684 }
683 return PTR_ERR(result); 685 return PTR_ERR(result);
684} 686}
@@ -902,7 +904,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
902 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 904 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
903 dout("unlink/rmdir dir %p dn %p inode %p\n", 905 dout("unlink/rmdir dir %p dn %p inode %p\n",
904 dir, dentry, inode); 906 dir, dentry, inode);
905 op = S_ISDIR(dentry->d_inode->i_mode) ? 907 op = d_is_dir(dentry) ?
906 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; 908 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
907 } else 909 } else
908 goto out; 910 goto out;
@@ -1335,6 +1337,13 @@ const struct file_operations ceph_dir_fops = {
1335 .fsync = ceph_dir_fsync, 1337 .fsync = ceph_dir_fsync,
1336}; 1338};
1337 1339
1340const struct file_operations ceph_snapdir_fops = {
1341 .iterate = ceph_readdir,
1342 .llseek = ceph_dir_llseek,
1343 .open = ceph_open,
1344 .release = ceph_release,
1345};
1346
1338const struct inode_operations ceph_dir_iops = { 1347const struct inode_operations ceph_dir_iops = {
1339 .lookup = ceph_lookup, 1348 .lookup = ceph_lookup,
1340 .permission = ceph_permission, 1349 .permission = ceph_permission,
@@ -1357,6 +1366,14 @@ const struct inode_operations ceph_dir_iops = {
1357 .atomic_open = ceph_atomic_open, 1366 .atomic_open = ceph_atomic_open,
1358}; 1367};
1359 1368
1369const struct inode_operations ceph_snapdir_iops = {
1370 .lookup = ceph_lookup,
1371 .permission = ceph_permission,
1372 .getattr = ceph_getattr,
1373 .mkdir = ceph_mkdir,
1374 .rmdir = ceph_unlink,
1375};
1376
1360const struct dentry_operations ceph_dentry_ops = { 1377const struct dentry_operations ceph_dentry_ops = {
1361 .d_revalidate = ceph_d_revalidate, 1378 .d_revalidate = ceph_d_revalidate,
1362 .d_release = ceph_d_release, 1379 .d_release = ceph_d_release,
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 905986dd4c3c..d533075a823d 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -275,10 +275,10 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
275 err = ceph_mdsc_do_request(mdsc, 275 err = ceph_mdsc_do_request(mdsc,
276 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, 276 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
277 req); 277 req);
278 err = ceph_handle_snapdir(req, dentry, err);
278 if (err) 279 if (err)
279 goto out_req; 280 goto out_req;
280 281
281 err = ceph_handle_snapdir(req, dentry, err);
282 if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) 282 if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
283 err = ceph_handle_notrace_create(dir, dentry); 283 err = ceph_handle_notrace_create(dir, dentry);
284 284
@@ -292,7 +292,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
292 } 292 }
293 if (err) 293 if (err)
294 goto out_req; 294 goto out_req;
295 if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) { 295 if (dn || dentry->d_inode == NULL || d_is_symlink(dentry)) {
296 /* make vfs retry on splice, ENOENT, or symlink */ 296 /* make vfs retry on splice, ENOENT, or symlink */
297 dout("atomic_open finish_no_open on dn %p\n", dn); 297 dout("atomic_open finish_no_open on dn %p\n", dn);
298 err = finish_no_open(file, dn); 298 err = finish_no_open(file, dn);
@@ -392,13 +392,14 @@ more:
392 if (ret >= 0) { 392 if (ret >= 0) {
393 int didpages; 393 int didpages;
394 if (was_short && (pos + ret < inode->i_size)) { 394 if (was_short && (pos + ret < inode->i_size)) {
395 u64 tmp = min(this_len - ret, 395 int zlen = min(this_len - ret,
396 inode->i_size - pos - ret); 396 inode->i_size - pos - ret);
397 int zoff = (o_direct ? buf_align : io_align) +
398 read + ret;
397 dout(" zero gap %llu to %llu\n", 399 dout(" zero gap %llu to %llu\n",
398 pos + ret, pos + ret + tmp); 400 pos + ret, pos + ret + zlen);
399 ceph_zero_page_vector_range(page_align + read + ret, 401 ceph_zero_page_vector_range(zoff, zlen, pages);
400 tmp, pages); 402 ret += zlen;
401 ret += tmp;
402 } 403 }
403 404
404 didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; 405 didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
@@ -878,28 +879,34 @@ again:
878 879
879 i_size = i_size_read(inode); 880 i_size = i_size_read(inode);
880 if (retry_op == READ_INLINE) { 881 if (retry_op == READ_INLINE) {
881 /* does not support inline data > PAGE_SIZE */ 882 BUG_ON(ret > 0 || read > 0);
882 if (i_size > PAGE_CACHE_SIZE) { 883 if (iocb->ki_pos < i_size &&
883 ret = -EIO; 884 iocb->ki_pos < PAGE_CACHE_SIZE) {
884 } else if (iocb->ki_pos < i_size) {
885 loff_t end = min_t(loff_t, i_size, 885 loff_t end = min_t(loff_t, i_size,
886 iocb->ki_pos + len); 886 iocb->ki_pos + len);
887 end = min_t(loff_t, end, PAGE_CACHE_SIZE);
887 if (statret < end) 888 if (statret < end)
888 zero_user_segment(page, statret, end); 889 zero_user_segment(page, statret, end);
889 ret = copy_page_to_iter(page, 890 ret = copy_page_to_iter(page,
890 iocb->ki_pos & ~PAGE_MASK, 891 iocb->ki_pos & ~PAGE_MASK,
891 end - iocb->ki_pos, to); 892 end - iocb->ki_pos, to);
892 iocb->ki_pos += ret; 893 iocb->ki_pos += ret;
893 } else { 894 read += ret;
894 ret = 0; 895 }
896 if (iocb->ki_pos < i_size && read < len) {
897 size_t zlen = min_t(size_t, len - read,
898 i_size - iocb->ki_pos);
899 ret = iov_iter_zero(zlen, to);
900 iocb->ki_pos += ret;
901 read += ret;
895 } 902 }
896 __free_pages(page, 0); 903 __free_pages(page, 0);
897 return ret; 904 return read;
898 } 905 }
899 906
900 /* hit EOF or hole? */ 907 /* hit EOF or hole? */
901 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && 908 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
902 ret < len) { 909 ret < len) {
903 dout("sync_read hit hole, ppos %lld < size %lld" 910 dout("sync_read hit hole, ppos %lld < size %lld"
904 ", reading more\n", iocb->ki_pos, 911 ", reading more\n", iocb->ki_pos,
905 inode->i_size); 912 inode->i_size);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 6b5173605154..119c43c80638 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -82,8 +82,8 @@ struct inode *ceph_get_snapdir(struct inode *parent)
82 inode->i_mode = parent->i_mode; 82 inode->i_mode = parent->i_mode;
83 inode->i_uid = parent->i_uid; 83 inode->i_uid = parent->i_uid;
84 inode->i_gid = parent->i_gid; 84 inode->i_gid = parent->i_gid;
85 inode->i_op = &ceph_dir_iops; 85 inode->i_op = &ceph_snapdir_iops;
86 inode->i_fop = &ceph_dir_fops; 86 inode->i_fop = &ceph_snapdir_fops;
87 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ 87 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
88 ci->i_rbytes = 0; 88 ci->i_rbytes = 0;
89 return inode; 89 return inode;
@@ -838,30 +838,31 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
838 ceph_vinop(inode), inode->i_mode); 838 ceph_vinop(inode), inode->i_mode);
839 } 839 }
840 840
841 /* set dir completion flag? */
842 if (S_ISDIR(inode->i_mode) &&
843 ci->i_files == 0 && ci->i_subdirs == 0 &&
844 ceph_snap(inode) == CEPH_NOSNAP &&
845 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
846 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
847 !__ceph_dir_is_complete(ci)) {
848 dout(" marking %p complete (empty)\n", inode);
849 __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count),
850 ci->i_ordered_count);
851 }
852
853 /* were we issued a capability? */ 841 /* were we issued a capability? */
854 if (info->cap.caps) { 842 if (info->cap.caps) {
855 if (ceph_snap(inode) == CEPH_NOSNAP) { 843 if (ceph_snap(inode) == CEPH_NOSNAP) {
844 unsigned caps = le32_to_cpu(info->cap.caps);
856 ceph_add_cap(inode, session, 845 ceph_add_cap(inode, session,
857 le64_to_cpu(info->cap.cap_id), 846 le64_to_cpu(info->cap.cap_id),
858 cap_fmode, 847 cap_fmode, caps,
859 le32_to_cpu(info->cap.caps),
860 le32_to_cpu(info->cap.wanted), 848 le32_to_cpu(info->cap.wanted),
861 le32_to_cpu(info->cap.seq), 849 le32_to_cpu(info->cap.seq),
862 le32_to_cpu(info->cap.mseq), 850 le32_to_cpu(info->cap.mseq),
863 le64_to_cpu(info->cap.realm), 851 le64_to_cpu(info->cap.realm),
864 info->cap.flags, &new_cap); 852 info->cap.flags, &new_cap);
853
854 /* set dir completion flag? */
855 if (S_ISDIR(inode->i_mode) &&
856 ci->i_files == 0 && ci->i_subdirs == 0 &&
857 (caps & CEPH_CAP_FILE_SHARED) &&
858 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
859 !__ceph_dir_is_complete(ci)) {
860 dout(" marking %p complete (empty)\n", inode);
861 __ceph_dir_set_complete(ci,
862 atomic_read(&ci->i_release_count),
863 ci->i_ordered_count);
864 }
865
865 wake = true; 866 wake = true;
866 } else { 867 } else {
867 dout(" %p got snap_caps %s\n", inode, 868 dout(" %p got snap_caps %s\n", inode,
@@ -1446,12 +1447,14 @@ retry_lookup:
1446 } 1447 }
1447 1448
1448 if (!dn->d_inode) { 1449 if (!dn->d_inode) {
1449 dn = splice_dentry(dn, in, NULL); 1450 struct dentry *realdn = splice_dentry(dn, in, NULL);
1450 if (IS_ERR(dn)) { 1451 if (IS_ERR(realdn)) {
1451 err = PTR_ERR(dn); 1452 err = PTR_ERR(realdn);
1453 d_drop(dn);
1452 dn = NULL; 1454 dn = NULL;
1453 goto next_item; 1455 goto next_item;
1454 } 1456 }
1457 dn = realdn;
1455 } 1458 }
1456 1459
1457 di = dn->d_fsdata; 1460 di = dn->d_fsdata;
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 06ea5cd05cd9..4347039ecc18 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -245,6 +245,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
245 */ 245 */
246void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) 246void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
247{ 247{
248 struct file_lock *lock;
248 struct file_lock_context *ctx; 249 struct file_lock_context *ctx;
249 250
250 *fcntl_count = 0; 251 *fcntl_count = 0;
@@ -252,8 +253,12 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
252 253
253 ctx = inode->i_flctx; 254 ctx = inode->i_flctx;
254 if (ctx) { 255 if (ctx) {
255 *fcntl_count = ctx->flc_posix_cnt; 256 spin_lock(&ctx->flc_lock);
256 *flock_count = ctx->flc_flock_cnt; 257 list_for_each_entry(lock, &ctx->flc_posix, fl_list)
258 ++(*fcntl_count);
259 list_for_each_entry(lock, &ctx->flc_flock, fl_list)
260 ++(*flock_count);
261 spin_unlock(&ctx->flc_lock);
257 } 262 }
258 dout("counted %d flock locks and %d fcntl locks", 263 dout("counted %d flock locks and %d fcntl locks",
259 *flock_count, *fcntl_count); 264 *flock_count, *fcntl_count);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 5f62fb7a5d0a..71c073f38e54 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -480,6 +480,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
480 mdsc->max_sessions = newmax; 480 mdsc->max_sessions = newmax;
481 } 481 }
482 mdsc->sessions[mds] = s; 482 mdsc->sessions[mds] = s;
483 atomic_inc(&mdsc->num_sessions);
483 atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */ 484 atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */
484 485
485 ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds, 486 ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
@@ -503,6 +504,7 @@ static void __unregister_session(struct ceph_mds_client *mdsc,
503 mdsc->sessions[s->s_mds] = NULL; 504 mdsc->sessions[s->s_mds] = NULL;
504 ceph_con_close(&s->s_con); 505 ceph_con_close(&s->s_con);
505 ceph_put_mds_session(s); 506 ceph_put_mds_session(s);
507 atomic_dec(&mdsc->num_sessions);
506} 508}
507 509
508/* 510/*
@@ -842,8 +844,9 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
842 struct ceph_options *opt = mdsc->fsc->client->options; 844 struct ceph_options *opt = mdsc->fsc->client->options;
843 void *p; 845 void *p;
844 846
845 const char* metadata[3][2] = { 847 const char* metadata[][2] = {
846 {"hostname", utsname()->nodename}, 848 {"hostname", utsname()->nodename},
849 {"kernel_version", utsname()->release},
847 {"entity_id", opt->name ? opt->name : ""}, 850 {"entity_id", opt->name ? opt->name : ""},
848 {NULL, NULL} 851 {NULL, NULL}
849 }; 852 };
@@ -1464,19 +1467,33 @@ out_unlocked:
1464 return err; 1467 return err;
1465} 1468}
1466 1469
1470static int check_cap_flush(struct inode *inode, u64 want_flush_seq)
1471{
1472 struct ceph_inode_info *ci = ceph_inode(inode);
1473 int ret;
1474 spin_lock(&ci->i_ceph_lock);
1475 if (ci->i_flushing_caps)
1476 ret = ci->i_cap_flush_seq >= want_flush_seq;
1477 else
1478 ret = 1;
1479 spin_unlock(&ci->i_ceph_lock);
1480 return ret;
1481}
1482
1467/* 1483/*
1468 * flush all dirty inode data to disk. 1484 * flush all dirty inode data to disk.
1469 * 1485 *
1470 * returns true if we've flushed through want_flush_seq 1486 * returns true if we've flushed through want_flush_seq
1471 */ 1487 */
1472static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) 1488static void wait_caps_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
1473{ 1489{
1474 int mds, ret = 1; 1490 int mds;
1475 1491
1476 dout("check_cap_flush want %lld\n", want_flush_seq); 1492 dout("check_cap_flush want %lld\n", want_flush_seq);
1477 mutex_lock(&mdsc->mutex); 1493 mutex_lock(&mdsc->mutex);
1478 for (mds = 0; ret && mds < mdsc->max_sessions; mds++) { 1494 for (mds = 0; mds < mdsc->max_sessions; mds++) {
1479 struct ceph_mds_session *session = mdsc->sessions[mds]; 1495 struct ceph_mds_session *session = mdsc->sessions[mds];
1496 struct inode *inode = NULL;
1480 1497
1481 if (!session) 1498 if (!session)
1482 continue; 1499 continue;
@@ -1489,29 +1506,29 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
1489 list_entry(session->s_cap_flushing.next, 1506 list_entry(session->s_cap_flushing.next,
1490 struct ceph_inode_info, 1507 struct ceph_inode_info,
1491 i_flushing_item); 1508 i_flushing_item);
1492 struct inode *inode = &ci->vfs_inode;
1493 1509
1494 spin_lock(&ci->i_ceph_lock); 1510 if (!check_cap_flush(&ci->vfs_inode, want_flush_seq)) {
1495 if (ci->i_cap_flush_seq <= want_flush_seq) {
1496 dout("check_cap_flush still flushing %p " 1511 dout("check_cap_flush still flushing %p "
1497 "seq %lld <= %lld to mds%d\n", inode, 1512 "seq %lld <= %lld to mds%d\n",
1498 ci->i_cap_flush_seq, want_flush_seq, 1513 &ci->vfs_inode, ci->i_cap_flush_seq,
1499 session->s_mds); 1514 want_flush_seq, session->s_mds);
1500 ret = 0; 1515 inode = igrab(&ci->vfs_inode);
1501 } 1516 }
1502 spin_unlock(&ci->i_ceph_lock);
1503 } 1517 }
1504 mutex_unlock(&session->s_mutex); 1518 mutex_unlock(&session->s_mutex);
1505 ceph_put_mds_session(session); 1519 ceph_put_mds_session(session);
1506 1520
1507 if (!ret) 1521 if (inode) {
1508 return ret; 1522 wait_event(mdsc->cap_flushing_wq,
1523 check_cap_flush(inode, want_flush_seq));
1524 iput(inode);
1525 }
1526
1509 mutex_lock(&mdsc->mutex); 1527 mutex_lock(&mdsc->mutex);
1510 } 1528 }
1511 1529
1512 mutex_unlock(&mdsc->mutex); 1530 mutex_unlock(&mdsc->mutex);
1513 dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq); 1531 dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq);
1514 return ret;
1515} 1532}
1516 1533
1517/* 1534/*
@@ -1923,7 +1940,11 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1923 head->num_releases = cpu_to_le16(releases); 1940 head->num_releases = cpu_to_le16(releases);
1924 1941
1925 /* time stamp */ 1942 /* time stamp */
1926 ceph_encode_copy(&p, &req->r_stamp, sizeof(req->r_stamp)); 1943 {
1944 struct ceph_timespec ts;
1945 ceph_encode_timespec(&ts, &req->r_stamp);
1946 ceph_encode_copy(&p, &ts, sizeof(ts));
1947 }
1927 1948
1928 BUG_ON(p > end); 1949 BUG_ON(p > end);
1929 msg->front.iov_len = p - msg->front.iov_base; 1950 msg->front.iov_len = p - msg->front.iov_base;
@@ -2012,7 +2033,11 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
2012 2033
2013 /* time stamp */ 2034 /* time stamp */
2014 p = msg->front.iov_base + req->r_request_release_offset; 2035 p = msg->front.iov_base + req->r_request_release_offset;
2015 ceph_encode_copy(&p, &req->r_stamp, sizeof(req->r_stamp)); 2036 {
2037 struct ceph_timespec ts;
2038 ceph_encode_timespec(&ts, &req->r_stamp);
2039 ceph_encode_copy(&p, &ts, sizeof(ts));
2040 }
2016 2041
2017 msg->front.iov_len = p - msg->front.iov_base; 2042 msg->front.iov_len = p - msg->front.iov_base;
2018 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2043 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
@@ -2159,6 +2184,8 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2159 p = rb_next(p); 2184 p = rb_next(p);
2160 if (req->r_got_unsafe) 2185 if (req->r_got_unsafe)
2161 continue; 2186 continue;
2187 if (req->r_attempts > 0)
2188 continue; /* only new requests */
2162 if (req->r_session && 2189 if (req->r_session &&
2163 req->r_session->s_mds == mds) { 2190 req->r_session->s_mds == mds) {
2164 dout(" kicking tid %llu\n", req->r_tid); 2191 dout(" kicking tid %llu\n", req->r_tid);
@@ -2286,6 +2313,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2286 struct ceph_mds_request *req; 2313 struct ceph_mds_request *req;
2287 struct ceph_mds_reply_head *head = msg->front.iov_base; 2314 struct ceph_mds_reply_head *head = msg->front.iov_base;
2288 struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ 2315 struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
2316 struct ceph_snap_realm *realm;
2289 u64 tid; 2317 u64 tid;
2290 int err, result; 2318 int err, result;
2291 int mds = session->s_mds; 2319 int mds = session->s_mds;
@@ -2401,11 +2429,13 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2401 } 2429 }
2402 2430
2403 /* snap trace */ 2431 /* snap trace */
2432 realm = NULL;
2404 if (rinfo->snapblob_len) { 2433 if (rinfo->snapblob_len) {
2405 down_write(&mdsc->snap_rwsem); 2434 down_write(&mdsc->snap_rwsem);
2406 ceph_update_snap_trace(mdsc, rinfo->snapblob, 2435 ceph_update_snap_trace(mdsc, rinfo->snapblob,
2407 rinfo->snapblob + rinfo->snapblob_len, 2436 rinfo->snapblob + rinfo->snapblob_len,
2408 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP); 2437 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
2438 &realm);
2409 downgrade_write(&mdsc->snap_rwsem); 2439 downgrade_write(&mdsc->snap_rwsem);
2410 } else { 2440 } else {
2411 down_read(&mdsc->snap_rwsem); 2441 down_read(&mdsc->snap_rwsem);
@@ -2423,6 +2453,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2423 mutex_unlock(&req->r_fill_mutex); 2453 mutex_unlock(&req->r_fill_mutex);
2424 2454
2425 up_read(&mdsc->snap_rwsem); 2455 up_read(&mdsc->snap_rwsem);
2456 if (realm)
2457 ceph_put_snap_realm(mdsc, realm);
2426out_err: 2458out_err:
2427 mutex_lock(&mdsc->mutex); 2459 mutex_lock(&mdsc->mutex);
2428 if (!req->r_aborted) { 2460 if (!req->r_aborted) {
@@ -2487,6 +2519,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
2487 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); 2519 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
2488 BUG_ON(req->r_err); 2520 BUG_ON(req->r_err);
2489 BUG_ON(req->r_got_result); 2521 BUG_ON(req->r_got_result);
2522 req->r_attempts = 0;
2490 req->r_num_fwd = fwd_seq; 2523 req->r_num_fwd = fwd_seq;
2491 req->r_resend_mds = next_mds; 2524 req->r_resend_mds = next_mds;
2492 put_request_session(req); 2525 put_request_session(req);
@@ -2580,6 +2613,14 @@ static void handle_session(struct ceph_mds_session *session,
2580 send_flushmsg_ack(mdsc, session, seq); 2613 send_flushmsg_ack(mdsc, session, seq);
2581 break; 2614 break;
2582 2615
2616 case CEPH_SESSION_FORCE_RO:
2617 dout("force_session_readonly %p\n", session);
2618 spin_lock(&session->s_cap_lock);
2619 session->s_readonly = true;
2620 spin_unlock(&session->s_cap_lock);
2621 wake_up_session_caps(session, 0);
2622 break;
2623
2583 default: 2624 default:
2584 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); 2625 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
2585 WARN_ON(1); 2626 WARN_ON(1);
@@ -2610,6 +2651,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
2610 struct ceph_mds_session *session) 2651 struct ceph_mds_session *session)
2611{ 2652{
2612 struct ceph_mds_request *req, *nreq; 2653 struct ceph_mds_request *req, *nreq;
2654 struct rb_node *p;
2613 int err; 2655 int err;
2614 2656
2615 dout("replay_unsafe_requests mds%d\n", session->s_mds); 2657 dout("replay_unsafe_requests mds%d\n", session->s_mds);
@@ -2622,6 +2664,28 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
2622 ceph_con_send(&session->s_con, req->r_request); 2664 ceph_con_send(&session->s_con, req->r_request);
2623 } 2665 }
2624 } 2666 }
2667
2668 /*
2669 * also re-send old requests when MDS enters reconnect stage. So that MDS
2670 * can process completed request in clientreplay stage.
2671 */
2672 p = rb_first(&mdsc->request_tree);
2673 while (p) {
2674 req = rb_entry(p, struct ceph_mds_request, r_node);
2675 p = rb_next(p);
2676 if (req->r_got_unsafe)
2677 continue;
2678 if (req->r_attempts == 0)
2679 continue; /* only old requests */
2680 if (req->r_session &&
2681 req->r_session->s_mds == session->s_mds) {
2682 err = __prepare_send_request(mdsc, req, session->s_mds);
2683 if (!err) {
2684 ceph_msg_get(req->r_request);
2685 ceph_con_send(&session->s_con, req->r_request);
2686 }
2687 }
2688 }
2625 mutex_unlock(&mdsc->mutex); 2689 mutex_unlock(&mdsc->mutex);
2626} 2690}
2627 2691
@@ -2787,6 +2851,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2787 spin_unlock(&session->s_gen_ttl_lock); 2851 spin_unlock(&session->s_gen_ttl_lock);
2788 2852
2789 spin_lock(&session->s_cap_lock); 2853 spin_lock(&session->s_cap_lock);
2854 /* don't know if session is readonly */
2855 session->s_readonly = 0;
2790 /* 2856 /*
2791 * notify __ceph_remove_cap() that we are composing cap reconnect. 2857 * notify __ceph_remove_cap() that we are composing cap reconnect.
2792 * If a cap get released before being added to the cap reconnect, 2858 * If a cap get released before being added to the cap reconnect,
@@ -2933,9 +2999,6 @@ static void check_new_map(struct ceph_mds_client *mdsc,
2933 mutex_unlock(&s->s_mutex); 2999 mutex_unlock(&s->s_mutex);
2934 s->s_state = CEPH_MDS_SESSION_RESTARTING; 3000 s->s_state = CEPH_MDS_SESSION_RESTARTING;
2935 } 3001 }
2936
2937 /* kick any requests waiting on the recovering mds */
2938 kick_requests(mdsc, i);
2939 } else if (oldstate == newstate) { 3002 } else if (oldstate == newstate) {
2940 continue; /* nothing new with this mds */ 3003 continue; /* nothing new with this mds */
2941 } 3004 }
@@ -3295,6 +3358,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
3295 init_waitqueue_head(&mdsc->session_close_wq); 3358 init_waitqueue_head(&mdsc->session_close_wq);
3296 INIT_LIST_HEAD(&mdsc->waiting_for_map); 3359 INIT_LIST_HEAD(&mdsc->waiting_for_map);
3297 mdsc->sessions = NULL; 3360 mdsc->sessions = NULL;
3361 atomic_set(&mdsc->num_sessions, 0);
3298 mdsc->max_sessions = 0; 3362 mdsc->max_sessions = 0;
3299 mdsc->stopping = 0; 3363 mdsc->stopping = 0;
3300 init_rwsem(&mdsc->snap_rwsem); 3364 init_rwsem(&mdsc->snap_rwsem);
@@ -3428,14 +3492,17 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3428 dout("sync\n"); 3492 dout("sync\n");
3429 mutex_lock(&mdsc->mutex); 3493 mutex_lock(&mdsc->mutex);
3430 want_tid = mdsc->last_tid; 3494 want_tid = mdsc->last_tid;
3431 want_flush = mdsc->cap_flush_seq;
3432 mutex_unlock(&mdsc->mutex); 3495 mutex_unlock(&mdsc->mutex);
3433 dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
3434 3496
3435 ceph_flush_dirty_caps(mdsc); 3497 ceph_flush_dirty_caps(mdsc);
3498 spin_lock(&mdsc->cap_dirty_lock);
3499 want_flush = mdsc->cap_flush_seq;
3500 spin_unlock(&mdsc->cap_dirty_lock);
3501
3502 dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
3436 3503
3437 wait_unsafe_requests(mdsc, want_tid); 3504 wait_unsafe_requests(mdsc, want_tid);
3438 wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush)); 3505 wait_caps_flush(mdsc, want_flush);
3439} 3506}
3440 3507
3441/* 3508/*
@@ -3443,17 +3510,9 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3443 */ 3510 */
3444static bool done_closing_sessions(struct ceph_mds_client *mdsc) 3511static bool done_closing_sessions(struct ceph_mds_client *mdsc)
3445{ 3512{
3446 int i, n = 0;
3447
3448 if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) 3513 if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
3449 return true; 3514 return true;
3450 3515 return atomic_read(&mdsc->num_sessions) == 0;
3451 mutex_lock(&mdsc->mutex);
3452 for (i = 0; i < mdsc->max_sessions; i++)
3453 if (mdsc->sessions[i])
3454 n++;
3455 mutex_unlock(&mdsc->mutex);
3456 return n == 0;
3457} 3516}
3458 3517
3459/* 3518/*
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index e2817d00f7d9..1875b5d985c6 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -137,6 +137,7 @@ struct ceph_mds_session {
137 int s_nr_caps, s_trim_caps; 137 int s_nr_caps, s_trim_caps;
138 int s_num_cap_releases; 138 int s_num_cap_releases;
139 int s_cap_reconnect; 139 int s_cap_reconnect;
140 int s_readonly;
140 struct list_head s_cap_releases; /* waiting cap_release messages */ 141 struct list_head s_cap_releases; /* waiting cap_release messages */
141 struct list_head s_cap_releases_done; /* ready to send */ 142 struct list_head s_cap_releases_done; /* ready to send */
142 struct ceph_cap *s_cap_iterator; 143 struct ceph_cap *s_cap_iterator;
@@ -272,6 +273,7 @@ struct ceph_mds_client {
272 struct list_head waiting_for_map; 273 struct list_head waiting_for_map;
273 274
274 struct ceph_mds_session **sessions; /* NULL for mds if no session */ 275 struct ceph_mds_session **sessions; /* NULL for mds if no session */
276 atomic_t num_sessions;
275 int max_sessions; /* len of s_mds_sessions */ 277 int max_sessions; /* len of s_mds_sessions */
276 int stopping; /* true if shutting down */ 278 int stopping; /* true if shutting down */
277 279
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index ce35fbd4ba5d..a97e39f09ba6 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -70,13 +70,11 @@ void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
70 * safe. we do need to protect against concurrent empty list 70 * safe. we do need to protect against concurrent empty list
71 * additions, however. 71 * additions, however.
72 */ 72 */
73 if (atomic_read(&realm->nref) == 0) { 73 if (atomic_inc_return(&realm->nref) == 1) {
74 spin_lock(&mdsc->snap_empty_lock); 74 spin_lock(&mdsc->snap_empty_lock);
75 list_del_init(&realm->empty_item); 75 list_del_init(&realm->empty_item);
76 spin_unlock(&mdsc->snap_empty_lock); 76 spin_unlock(&mdsc->snap_empty_lock);
77 } 77 }
78
79 atomic_inc(&realm->nref);
80} 78}
81 79
82static void __insert_snap_realm(struct rb_root *root, 80static void __insert_snap_realm(struct rb_root *root,
@@ -116,7 +114,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
116 if (!realm) 114 if (!realm)
117 return ERR_PTR(-ENOMEM); 115 return ERR_PTR(-ENOMEM);
118 116
119 atomic_set(&realm->nref, 0); /* tree does not take a ref */ 117 atomic_set(&realm->nref, 1); /* for caller */
120 realm->ino = ino; 118 realm->ino = ino;
121 INIT_LIST_HEAD(&realm->children); 119 INIT_LIST_HEAD(&realm->children);
122 INIT_LIST_HEAD(&realm->child_item); 120 INIT_LIST_HEAD(&realm->child_item);
@@ -134,8 +132,8 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
134 * 132 *
135 * caller must hold snap_rwsem for write. 133 * caller must hold snap_rwsem for write.
136 */ 134 */
137struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, 135static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
138 u64 ino) 136 u64 ino)
139{ 137{
140 struct rb_node *n = mdsc->snap_realms.rb_node; 138 struct rb_node *n = mdsc->snap_realms.rb_node;
141 struct ceph_snap_realm *r; 139 struct ceph_snap_realm *r;
@@ -154,6 +152,16 @@ struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
154 return NULL; 152 return NULL;
155} 153}
156 154
155struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
156 u64 ino)
157{
158 struct ceph_snap_realm *r;
159 r = __lookup_snap_realm(mdsc, ino);
160 if (r)
161 ceph_get_snap_realm(mdsc, r);
162 return r;
163}
164
157static void __put_snap_realm(struct ceph_mds_client *mdsc, 165static void __put_snap_realm(struct ceph_mds_client *mdsc,
158 struct ceph_snap_realm *realm); 166 struct ceph_snap_realm *realm);
159 167
@@ -273,7 +281,6 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
273 } 281 }
274 realm->parent_ino = parentino; 282 realm->parent_ino = parentino;
275 realm->parent = parent; 283 realm->parent = parent;
276 ceph_get_snap_realm(mdsc, parent);
277 list_add(&realm->child_item, &parent->children); 284 list_add(&realm->child_item, &parent->children);
278 return 1; 285 return 1;
279} 286}
@@ -631,12 +638,14 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
631 * Caller must hold snap_rwsem for write. 638 * Caller must hold snap_rwsem for write.
632 */ 639 */
633int ceph_update_snap_trace(struct ceph_mds_client *mdsc, 640int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
634 void *p, void *e, bool deletion) 641 void *p, void *e, bool deletion,
642 struct ceph_snap_realm **realm_ret)
635{ 643{
636 struct ceph_mds_snap_realm *ri; /* encoded */ 644 struct ceph_mds_snap_realm *ri; /* encoded */
637 __le64 *snaps; /* encoded */ 645 __le64 *snaps; /* encoded */
638 __le64 *prior_parent_snaps; /* encoded */ 646 __le64 *prior_parent_snaps; /* encoded */
639 struct ceph_snap_realm *realm; 647 struct ceph_snap_realm *realm = NULL;
648 struct ceph_snap_realm *first_realm = NULL;
640 int invalidate = 0; 649 int invalidate = 0;
641 int err = -ENOMEM; 650 int err = -ENOMEM;
642 LIST_HEAD(dirty_realms); 651 LIST_HEAD(dirty_realms);
@@ -704,13 +713,18 @@ more:
704 dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino, 713 dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
705 realm, invalidate, p, e); 714 realm, invalidate, p, e);
706 715
707 if (p < e)
708 goto more;
709
710 /* invalidate when we reach the _end_ (root) of the trace */ 716 /* invalidate when we reach the _end_ (root) of the trace */
711 if (invalidate) 717 if (invalidate && p >= e)
712 rebuild_snap_realms(realm); 718 rebuild_snap_realms(realm);
713 719
720 if (!first_realm)
721 first_realm = realm;
722 else
723 ceph_put_snap_realm(mdsc, realm);
724
725 if (p < e)
726 goto more;
727
714 /* 728 /*
715 * queue cap snaps _after_ we've built the new snap contexts, 729 * queue cap snaps _after_ we've built the new snap contexts,
716 * so that i_head_snapc can be set appropriately. 730 * so that i_head_snapc can be set appropriately.
@@ -721,12 +735,21 @@ more:
721 queue_realm_cap_snaps(realm); 735 queue_realm_cap_snaps(realm);
722 } 736 }
723 737
738 if (realm_ret)
739 *realm_ret = first_realm;
740 else
741 ceph_put_snap_realm(mdsc, first_realm);
742
724 __cleanup_empty_realms(mdsc); 743 __cleanup_empty_realms(mdsc);
725 return 0; 744 return 0;
726 745
727bad: 746bad:
728 err = -EINVAL; 747 err = -EINVAL;
729fail: 748fail:
749 if (realm && !IS_ERR(realm))
750 ceph_put_snap_realm(mdsc, realm);
751 if (first_realm)
752 ceph_put_snap_realm(mdsc, first_realm);
730 pr_err("update_snap_trace error %d\n", err); 753 pr_err("update_snap_trace error %d\n", err);
731 return err; 754 return err;
732} 755}
@@ -844,7 +867,6 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
844 if (IS_ERR(realm)) 867 if (IS_ERR(realm))
845 goto out; 868 goto out;
846 } 869 }
847 ceph_get_snap_realm(mdsc, realm);
848 870
849 dout("splitting snap_realm %llx %p\n", realm->ino, realm); 871 dout("splitting snap_realm %llx %p\n", realm->ino, realm);
850 for (i = 0; i < num_split_inos; i++) { 872 for (i = 0; i < num_split_inos; i++) {
@@ -905,7 +927,7 @@ skip_inode:
905 /* we may have taken some of the old realm's children. */ 927 /* we may have taken some of the old realm's children. */
906 for (i = 0; i < num_split_realms; i++) { 928 for (i = 0; i < num_split_realms; i++) {
907 struct ceph_snap_realm *child = 929 struct ceph_snap_realm *child =
908 ceph_lookup_snap_realm(mdsc, 930 __lookup_snap_realm(mdsc,
909 le64_to_cpu(split_realms[i])); 931 le64_to_cpu(split_realms[i]));
910 if (!child) 932 if (!child)
911 continue; 933 continue;
@@ -918,7 +940,7 @@ skip_inode:
918 * snap, we can avoid queueing cap_snaps. 940 * snap, we can avoid queueing cap_snaps.
919 */ 941 */
920 ceph_update_snap_trace(mdsc, p, e, 942 ceph_update_snap_trace(mdsc, p, e,
921 op == CEPH_SNAP_OP_DESTROY); 943 op == CEPH_SNAP_OP_DESTROY, NULL);
922 944
923 if (op == CEPH_SNAP_OP_SPLIT) 945 if (op == CEPH_SNAP_OP_SPLIT)
924 /* we took a reference when we created the realm, above */ 946 /* we took a reference when we created the realm, above */
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 5ae62587a71d..a63997b8bcff 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -414,6 +414,10 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
414 seq_puts(m, ",noshare"); 414 seq_puts(m, ",noshare");
415 if (opt->flags & CEPH_OPT_NOCRC) 415 if (opt->flags & CEPH_OPT_NOCRC)
416 seq_puts(m, ",nocrc"); 416 seq_puts(m, ",nocrc");
417 if (opt->flags & CEPH_OPT_NOMSGAUTH)
418 seq_puts(m, ",nocephx_require_signatures");
419 if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0)
420 seq_puts(m, ",notcp_nodelay");
417 421
418 if (opt->name) 422 if (opt->name)
419 seq_printf(m, ",name=%s", opt->name); 423 seq_printf(m, ",name=%s", opt->name);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index e1aa32d0759d..04c8124ed30e 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -693,7 +693,8 @@ extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
693extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc, 693extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
694 struct ceph_snap_realm *realm); 694 struct ceph_snap_realm *realm);
695extern int ceph_update_snap_trace(struct ceph_mds_client *m, 695extern int ceph_update_snap_trace(struct ceph_mds_client *m,
696 void *p, void *e, bool deletion); 696 void *p, void *e, bool deletion,
697 struct ceph_snap_realm **realm_ret);
697extern void ceph_handle_snap(struct ceph_mds_client *mdsc, 698extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
698 struct ceph_mds_session *session, 699 struct ceph_mds_session *session,
699 struct ceph_msg *msg); 700 struct ceph_msg *msg);
@@ -892,7 +893,9 @@ extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
892int ceph_uninline_data(struct file *filp, struct page *locked_page); 893int ceph_uninline_data(struct file *filp, struct page *locked_page);
893/* dir.c */ 894/* dir.c */
894extern const struct file_operations ceph_dir_fops; 895extern const struct file_operations ceph_dir_fops;
896extern const struct file_operations ceph_snapdir_fops;
895extern const struct inode_operations ceph_dir_iops; 897extern const struct inode_operations ceph_dir_iops;
898extern const struct inode_operations ceph_snapdir_iops;
896extern const struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops, 899extern const struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops,
897 ceph_snapdir_dentry_ops; 900 ceph_snapdir_dentry_ops;
898 901
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 8fe1f7a21b3e..a94b3e673182 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1129,7 +1129,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
1129 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1129 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1130 struct file_lock *flock; 1130 struct file_lock *flock;
1131 struct file_lock_context *flctx = inode->i_flctx; 1131 struct file_lock_context *flctx = inode->i_flctx;
1132 unsigned int i; 1132 unsigned int count = 0, i;
1133 int rc = 0, xid, type; 1133 int rc = 0, xid, type;
1134 struct list_head locks_to_send, *el; 1134 struct list_head locks_to_send, *el;
1135 struct lock_to_push *lck, *tmp; 1135 struct lock_to_push *lck, *tmp;
@@ -1140,14 +1140,20 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
1140 if (!flctx) 1140 if (!flctx)
1141 goto out; 1141 goto out;
1142 1142
1143 spin_lock(&flctx->flc_lock);
1144 list_for_each(el, &flctx->flc_posix) {
1145 count++;
1146 }
1147 spin_unlock(&flctx->flc_lock);
1148
1143 INIT_LIST_HEAD(&locks_to_send); 1149 INIT_LIST_HEAD(&locks_to_send);
1144 1150
1145 /* 1151 /*
1146 * Allocating flc_posix_cnt locks is enough because no FL_POSIX locks 1152 * Allocating count locks is enough because no FL_POSIX locks can be
1147 * can be added to the list while we are holding cinode->lock_sem that 1153 * added to the list while we are holding cinode->lock_sem that
1148 * protects locking operations of this inode. 1154 * protects locking operations of this inode.
1149 */ 1155 */
1150 for (i = 0; i < flctx->flc_posix_cnt; i++) { 1156 for (i = 0; i < count; i++) {
1151 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); 1157 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1152 if (!lck) { 1158 if (!lck) {
1153 rc = -ENOMEM; 1159 rc = -ENOMEM;
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 281ee011bb6a..60cb88c1dd2b 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -304,7 +304,7 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
304 (const char *) old_name, (const char *)new_name); 304 (const char *) old_name, (const char *)new_name);
305 if (!error) { 305 if (!error) {
306 if (new_dentry->d_inode) { 306 if (new_dentry->d_inode) {
307 if (S_ISDIR(new_dentry->d_inode->i_mode)) { 307 if (d_is_dir(new_dentry)) {
308 coda_dir_drop_nlink(old_dir); 308 coda_dir_drop_nlink(old_dir);
309 coda_dir_inc_nlink(new_dir); 309 coda_dir_inc_nlink(new_dir);
310 } 310 }
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index a315677e44d3..b65d1ef532d5 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -69,14 +69,13 @@ extern struct kmem_cache *configfs_dir_cachep;
69extern int configfs_is_root(struct config_item *item); 69extern int configfs_is_root(struct config_item *item);
70 70
71extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *); 71extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *);
72extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *)); 72extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct inode *));
73 73
74extern int configfs_create_file(struct config_item *, const struct configfs_attribute *); 74extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
75extern int configfs_make_dirent(struct configfs_dirent *, 75extern int configfs_make_dirent(struct configfs_dirent *,
76 struct dentry *, void *, umode_t, int); 76 struct dentry *, void *, umode_t, int);
77extern int configfs_dirent_is_ready(struct configfs_dirent *); 77extern int configfs_dirent_is_ready(struct configfs_dirent *);
78 78
79extern int configfs_add_file(struct dentry *, const struct configfs_attribute *, int);
80extern void configfs_hash_and_remove(struct dentry * dir, const char * name); 79extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
81 80
82extern const unsigned char * configfs_get_name(struct configfs_dirent *sd); 81extern const unsigned char * configfs_get_name(struct configfs_dirent *sd);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index c9c298bd3058..cf0db005d2f5 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -240,60 +240,26 @@ int configfs_make_dirent(struct configfs_dirent * parent_sd,
240 return 0; 240 return 0;
241} 241}
242 242
243static int init_dir(struct inode * inode) 243static void init_dir(struct inode * inode)
244{ 244{
245 inode->i_op = &configfs_dir_inode_operations; 245 inode->i_op = &configfs_dir_inode_operations;
246 inode->i_fop = &configfs_dir_operations; 246 inode->i_fop = &configfs_dir_operations;
247 247
248 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 248 /* directory inodes start off with i_nlink == 2 (for "." entry) */
249 inc_nlink(inode); 249 inc_nlink(inode);
250 return 0;
251} 250}
252 251
253static int configfs_init_file(struct inode * inode) 252static void configfs_init_file(struct inode * inode)
254{ 253{
255 inode->i_size = PAGE_SIZE; 254 inode->i_size = PAGE_SIZE;
256 inode->i_fop = &configfs_file_operations; 255 inode->i_fop = &configfs_file_operations;
257 return 0;
258} 256}
259 257
260static int init_symlink(struct inode * inode) 258static void init_symlink(struct inode * inode)
261{ 259{
262 inode->i_op = &configfs_symlink_inode_operations; 260 inode->i_op = &configfs_symlink_inode_operations;
263 return 0;
264}
265
266static int create_dir(struct config_item *k, struct dentry *d)
267{
268 int error;
269 umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
270 struct dentry *p = d->d_parent;
271
272 BUG_ON(!k);
273
274 error = configfs_dirent_exists(p->d_fsdata, d->d_name.name);
275 if (!error)
276 error = configfs_make_dirent(p->d_fsdata, d, k, mode,
277 CONFIGFS_DIR | CONFIGFS_USET_CREATING);
278 if (!error) {
279 configfs_set_dir_dirent_depth(p->d_fsdata, d->d_fsdata);
280 error = configfs_create(d, mode, init_dir);
281 if (!error) {
282 inc_nlink(p->d_inode);
283 } else {
284 struct configfs_dirent *sd = d->d_fsdata;
285 if (sd) {
286 spin_lock(&configfs_dirent_lock);
287 list_del_init(&sd->s_sibling);
288 spin_unlock(&configfs_dirent_lock);
289 configfs_put(sd);
290 }
291 }
292 }
293 return error;
294} 261}
295 262
296
297/** 263/**
298 * configfs_create_dir - create a directory for an config_item. 264 * configfs_create_dir - create a directory for an config_item.
299 * @item: config_itemwe're creating directory for. 265 * @item: config_itemwe're creating directory for.
@@ -303,11 +269,37 @@ static int create_dir(struct config_item *k, struct dentry *d)
303 * until it is validated by configfs_dir_set_ready() 269 * until it is validated by configfs_dir_set_ready()
304 */ 270 */
305 271
306static int configfs_create_dir(struct config_item * item, struct dentry *dentry) 272static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
307{ 273{
308 int error = create_dir(item, dentry); 274 int error;
309 if (!error) 275 umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
276 struct dentry *p = dentry->d_parent;
277
278 BUG_ON(!item);
279
280 error = configfs_dirent_exists(p->d_fsdata, dentry->d_name.name);
281 if (unlikely(error))
282 return error;
283
284 error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
285 CONFIGFS_DIR | CONFIGFS_USET_CREATING);
286 if (unlikely(error))
287 return error;
288
289 configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata);
290 error = configfs_create(dentry, mode, init_dir);
291 if (!error) {
292 inc_nlink(p->d_inode);
310 item->ci_dentry = dentry; 293 item->ci_dentry = dentry;
294 } else {
295 struct configfs_dirent *sd = dentry->d_fsdata;
296 if (sd) {
297 spin_lock(&configfs_dirent_lock);
298 list_del_init(&sd->s_sibling);
299 spin_unlock(&configfs_dirent_lock);
300 configfs_put(sd);
301 }
302 }
311 return error; 303 return error;
312} 304}
313 305
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 1d1c41f1014d..56d2cdc9ae0a 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -313,21 +313,6 @@ const struct file_operations configfs_file_operations = {
313 .release = configfs_release, 313 .release = configfs_release,
314}; 314};
315 315
316
317int configfs_add_file(struct dentry * dir, const struct configfs_attribute * attr, int type)
318{
319 struct configfs_dirent * parent_sd = dir->d_fsdata;
320 umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
321 int error = 0;
322
323 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL);
324 error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type);
325 mutex_unlock(&dir->d_inode->i_mutex);
326
327 return error;
328}
329
330
331/** 316/**
332 * configfs_create_file - create an attribute file for an item. 317 * configfs_create_file - create an attribute file for an item.
333 * @item: item we're creating for. 318 * @item: item we're creating for.
@@ -336,9 +321,16 @@ int configfs_add_file(struct dentry * dir, const struct configfs_attribute * att
336 321
337int configfs_create_file(struct config_item * item, const struct configfs_attribute * attr) 322int configfs_create_file(struct config_item * item, const struct configfs_attribute * attr)
338{ 323{
339 BUG_ON(!item || !item->ci_dentry || !attr); 324 struct dentry *dir = item->ci_dentry;
325 struct configfs_dirent *parent_sd = dir->d_fsdata;
326 umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
327 int error = 0;
340 328
341 return configfs_add_file(item->ci_dentry, attr, 329 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL);
342 CONFIGFS_ITEM_ATTR); 330 error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
331 CONFIGFS_ITEM_ATTR);
332 mutex_unlock(&dir->d_inode->i_mutex);
333
334 return error;
343} 335}
344 336
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 65af86147154..5423a6a6ecc8 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -176,7 +176,7 @@ static void configfs_set_inode_lock_class(struct configfs_dirent *sd,
176 176
177#endif /* CONFIG_LOCKDEP */ 177#endif /* CONFIG_LOCKDEP */
178 178
179int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct inode *)) 179int configfs_create(struct dentry * dentry, umode_t mode, void (*init)(struct inode *))
180{ 180{
181 int error = 0; 181 int error = 0;
182 struct inode *inode = NULL; 182 struct inode *inode = NULL;
@@ -198,13 +198,7 @@ int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct ino
198 p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME; 198 p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME;
199 configfs_set_inode_lock_class(sd, inode); 199 configfs_set_inode_lock_class(sd, inode);
200 200
201 if (init) { 201 init(inode);
202 error = init(inode);
203 if (error) {
204 iput(inode);
205 return error;
206 }
207 }
208 d_instantiate(dentry, inode); 202 d_instantiate(dentry, inode);
209 if (S_ISDIR(mode) || S_ISLNK(mode)) 203 if (S_ISDIR(mode) || S_ISLNK(mode))
210 dget(dentry); /* pin link and directory dentries in core */ 204 dget(dentry); /* pin link and directory dentries in core */
@@ -242,7 +236,7 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
242 236
243 if (dentry) { 237 if (dentry) {
244 spin_lock(&dentry->d_lock); 238 spin_lock(&dentry->d_lock);
245 if (!(d_unhashed(dentry) && dentry->d_inode)) { 239 if (!d_unhashed(dentry) && dentry->d_inode) {
246 dget_dlock(dentry); 240 dget_dlock(dentry);
247 __d_drop(dentry); 241 __d_drop(dentry);
248 spin_unlock(&dentry->d_lock); 242 spin_unlock(&dentry->d_lock);
diff --git a/fs/coredump.c b/fs/coredump.c
index b5c86ffd5033..f319926ddf8c 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -572,7 +572,7 @@ void do_coredump(const siginfo_t *siginfo)
572 * 572 *
573 * Normally core limits are irrelevant to pipes, since 573 * Normally core limits are irrelevant to pipes, since
574 * we're not writing to the file system, but we use 574 * we're not writing to the file system, but we use
575 * cprm.limit of 1 here as a speacial value, this is a 575 * cprm.limit of 1 here as a special value, this is a
576 * consistent way to catch recursive crashes. 576 * consistent way to catch recursive crashes.
577 * We can still crash if the core_pattern binary sets 577 * We can still crash if the core_pattern binary sets
578 * RLIM_CORE = !1, but it runs as root, and can do 578 * RLIM_CORE = !1, but it runs as root, and can do
diff --git a/fs/dcache.c b/fs/dcache.c
index dc400fd29f4d..c71e3732e53b 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1659,9 +1659,25 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1659} 1659}
1660EXPORT_SYMBOL(d_set_d_op); 1660EXPORT_SYMBOL(d_set_d_op);
1661 1661
1662
1663/*
1664 * d_set_fallthru - Mark a dentry as falling through to a lower layer
1665 * @dentry - The dentry to mark
1666 *
1667 * Mark a dentry as falling through to the lower layer (as set with
1668 * d_pin_lower()). This flag may be recorded on the medium.
1669 */
1670void d_set_fallthru(struct dentry *dentry)
1671{
1672 spin_lock(&dentry->d_lock);
1673 dentry->d_flags |= DCACHE_FALLTHRU;
1674 spin_unlock(&dentry->d_lock);
1675}
1676EXPORT_SYMBOL(d_set_fallthru);
1677
1662static unsigned d_flags_for_inode(struct inode *inode) 1678static unsigned d_flags_for_inode(struct inode *inode)
1663{ 1679{
1664 unsigned add_flags = DCACHE_FILE_TYPE; 1680 unsigned add_flags = DCACHE_REGULAR_TYPE;
1665 1681
1666 if (!inode) 1682 if (!inode)
1667 return DCACHE_MISS_TYPE; 1683 return DCACHE_MISS_TYPE;
@@ -1674,13 +1690,21 @@ static unsigned d_flags_for_inode(struct inode *inode)
1674 else 1690 else
1675 inode->i_opflags |= IOP_LOOKUP; 1691 inode->i_opflags |= IOP_LOOKUP;
1676 } 1692 }
1677 } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { 1693 goto type_determined;
1678 if (unlikely(inode->i_op->follow_link)) 1694 }
1695
1696 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1697 if (unlikely(inode->i_op->follow_link)) {
1679 add_flags = DCACHE_SYMLINK_TYPE; 1698 add_flags = DCACHE_SYMLINK_TYPE;
1680 else 1699 goto type_determined;
1681 inode->i_opflags |= IOP_NOFOLLOW; 1700 }
1701 inode->i_opflags |= IOP_NOFOLLOW;
1682 } 1702 }
1683 1703
1704 if (unlikely(!S_ISREG(inode->i_mode)))
1705 add_flags = DCACHE_SPECIAL_TYPE;
1706
1707type_determined:
1684 if (unlikely(IS_AUTOMOUNT(inode))) 1708 if (unlikely(IS_AUTOMOUNT(inode)))
1685 add_flags |= DCACHE_NEED_AUTOMOUNT; 1709 add_flags |= DCACHE_NEED_AUTOMOUNT;
1686 return add_flags; 1710 return add_flags;
@@ -1691,7 +1715,8 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1691 unsigned add_flags = d_flags_for_inode(inode); 1715 unsigned add_flags = d_flags_for_inode(inode);
1692 1716
1693 spin_lock(&dentry->d_lock); 1717 spin_lock(&dentry->d_lock);
1694 __d_set_type(dentry, add_flags); 1718 dentry->d_flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
1719 dentry->d_flags |= add_flags;
1695 if (inode) 1720 if (inode)
1696 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); 1721 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1697 dentry->d_inode = inode; 1722 dentry->d_inode = inode;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 45b18a5e225c..96400ab42d13 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -169,10 +169,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
169 return 0; 169 return 0;
170} 170}
171 171
172static void debugfs_evict_inode(struct inode *inode)
173{
174 truncate_inode_pages_final(&inode->i_data);
175 clear_inode(inode);
176 if (S_ISLNK(inode->i_mode))
177 kfree(inode->i_private);
178}
179
172static const struct super_operations debugfs_super_operations = { 180static const struct super_operations debugfs_super_operations = {
173 .statfs = simple_statfs, 181 .statfs = simple_statfs,
174 .remount_fs = debugfs_remount, 182 .remount_fs = debugfs_remount,
175 .show_options = debugfs_show_options, 183 .show_options = debugfs_show_options,
184 .evict_inode = debugfs_evict_inode,
176}; 185};
177 186
178static struct vfsmount *debugfs_automount(struct path *path) 187static struct vfsmount *debugfs_automount(struct path *path)
@@ -511,23 +520,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
511 int ret = 0; 520 int ret = 0;
512 521
513 if (debugfs_positive(dentry)) { 522 if (debugfs_positive(dentry)) {
514 if (dentry->d_inode) { 523 dget(dentry);
515 dget(dentry); 524 if (S_ISDIR(dentry->d_inode->i_mode))
516 switch (dentry->d_inode->i_mode & S_IFMT) { 525 ret = simple_rmdir(parent->d_inode, dentry);
517 case S_IFDIR: 526 else
518 ret = simple_rmdir(parent->d_inode, dentry); 527 simple_unlink(parent->d_inode, dentry);
519 break; 528 if (!ret)
520 case S_IFLNK: 529 d_delete(dentry);
521 kfree(dentry->d_inode->i_private); 530 dput(dentry);
522 /* fall through */
523 default:
524 simple_unlink(parent->d_inode, dentry);
525 break;
526 }
527 if (!ret)
528 d_delete(dentry);
529 dput(dentry);
530 }
531 } 531 }
532 return ret; 532 return ret;
533} 533}
@@ -690,7 +690,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
690 } 690 }
691 d_move(old_dentry, dentry); 691 d_move(old_dentry, dentry);
692 fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name, 692 fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name,
693 S_ISDIR(old_dentry->d_inode->i_mode), 693 d_is_dir(old_dentry),
694 NULL, old_dentry); 694 NULL, old_dentry);
695 fsnotify_oldname_free(old_name); 695 fsnotify_oldname_free(old_name);
696 unlock_rename(new_dir, old_dir); 696 unlock_rename(new_dir, old_dir);
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 6f4e659f508f..b07731e68c0b 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -230,7 +230,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
230 } 230 }
231 ecryptfs_set_file_lower( 231 ecryptfs_set_file_lower(
232 file, ecryptfs_inode_to_private(inode)->lower_file); 232 file, ecryptfs_inode_to_private(inode)->lower_file);
233 if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { 233 if (d_is_dir(ecryptfs_dentry)) {
234 ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); 234 ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
235 mutex_lock(&crypt_stat->cs_mutex); 235 mutex_lock(&crypt_stat->cs_mutex);
236 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); 236 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 34b36a504059..b08b5187f662 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -907,9 +907,9 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
907 lower_inode = ecryptfs_inode_to_lower(inode); 907 lower_inode = ecryptfs_inode_to_lower(inode);
908 lower_dentry = ecryptfs_dentry_to_lower(dentry); 908 lower_dentry = ecryptfs_dentry_to_lower(dentry);
909 mutex_lock(&crypt_stat->cs_mutex); 909 mutex_lock(&crypt_stat->cs_mutex);
910 if (S_ISDIR(dentry->d_inode->i_mode)) 910 if (d_is_dir(dentry))
911 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); 911 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
912 else if (S_ISREG(dentry->d_inode->i_mode) 912 else if (d_is_reg(dentry)
913 && (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED) 913 && (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
914 || !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) { 914 || !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) {
915 struct ecryptfs_mount_crypt_stat *mount_crypt_stat; 915 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index fdfd206c737a..714cd37a6ba3 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -429,7 +429,7 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
429 if (IS_ERR(result)) 429 if (IS_ERR(result))
430 return result; 430 return result;
431 431
432 if (S_ISDIR(result->d_inode->i_mode)) { 432 if (d_is_dir(result)) {
433 /* 433 /*
434 * This request is for a directory. 434 * This request is for a directory.
435 * 435 *
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 982d934fd9ac..f63c3d5805c4 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -364,7 +364,8 @@ struct flex_groups {
364#define EXT4_DIRTY_FL 0x00000100 364#define EXT4_DIRTY_FL 0x00000100
365#define EXT4_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */ 365#define EXT4_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */
366#define EXT4_NOCOMPR_FL 0x00000400 /* Don't compress */ 366#define EXT4_NOCOMPR_FL 0x00000400 /* Don't compress */
367#define EXT4_ECOMPR_FL 0x00000800 /* Compression error */ 367 /* nb: was previously EXT2_ECOMPR_FL */
368#define EXT4_ENCRYPT_FL 0x00000800 /* encrypted file */
368/* End compression flags --- maybe not all used */ 369/* End compression flags --- maybe not all used */
369#define EXT4_INDEX_FL 0x00001000 /* hash-indexed directory */ 370#define EXT4_INDEX_FL 0x00001000 /* hash-indexed directory */
370#define EXT4_IMAGIC_FL 0x00002000 /* AFS directory */ 371#define EXT4_IMAGIC_FL 0x00002000 /* AFS directory */
@@ -421,7 +422,7 @@ enum {
421 EXT4_INODE_DIRTY = 8, 422 EXT4_INODE_DIRTY = 8,
422 EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */ 423 EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */
423 EXT4_INODE_NOCOMPR = 10, /* Don't compress */ 424 EXT4_INODE_NOCOMPR = 10, /* Don't compress */
424 EXT4_INODE_ECOMPR = 11, /* Compression error */ 425 EXT4_INODE_ENCRYPT = 11, /* Compression error */
425/* End compression flags --- maybe not all used */ 426/* End compression flags --- maybe not all used */
426 EXT4_INODE_INDEX = 12, /* hash-indexed directory */ 427 EXT4_INODE_INDEX = 12, /* hash-indexed directory */
427 EXT4_INODE_IMAGIC = 13, /* AFS directory */ 428 EXT4_INODE_IMAGIC = 13, /* AFS directory */
@@ -466,7 +467,7 @@ static inline void ext4_check_flag_values(void)
466 CHECK_FLAG_VALUE(DIRTY); 467 CHECK_FLAG_VALUE(DIRTY);
467 CHECK_FLAG_VALUE(COMPRBLK); 468 CHECK_FLAG_VALUE(COMPRBLK);
468 CHECK_FLAG_VALUE(NOCOMPR); 469 CHECK_FLAG_VALUE(NOCOMPR);
469 CHECK_FLAG_VALUE(ECOMPR); 470 CHECK_FLAG_VALUE(ENCRYPT);
470 CHECK_FLAG_VALUE(INDEX); 471 CHECK_FLAG_VALUE(INDEX);
471 CHECK_FLAG_VALUE(IMAGIC); 472 CHECK_FLAG_VALUE(IMAGIC);
472 CHECK_FLAG_VALUE(JOURNAL_DATA); 473 CHECK_FLAG_VALUE(JOURNAL_DATA);
@@ -1048,6 +1049,12 @@ extern void ext4_set_bits(void *bm, int cur, int len);
1048/* Metadata checksum algorithm codes */ 1049/* Metadata checksum algorithm codes */
1049#define EXT4_CRC32C_CHKSUM 1 1050#define EXT4_CRC32C_CHKSUM 1
1050 1051
1052/* Encryption algorithms */
1053#define EXT4_ENCRYPTION_MODE_INVALID 0
1054#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1
1055#define EXT4_ENCRYPTION_MODE_AES_256_GCM 2
1056#define EXT4_ENCRYPTION_MODE_AES_256_CBC 3
1057
1051/* 1058/*
1052 * Structure of the super block 1059 * Structure of the super block
1053 */ 1060 */
@@ -1161,7 +1168,8 @@ struct ext4_super_block {
1161 __le32 s_grp_quota_inum; /* inode for tracking group quota */ 1168 __le32 s_grp_quota_inum; /* inode for tracking group quota */
1162 __le32 s_overhead_clusters; /* overhead blocks/clusters in fs */ 1169 __le32 s_overhead_clusters; /* overhead blocks/clusters in fs */
1163 __le32 s_backup_bgs[2]; /* groups with sparse_super2 SBs */ 1170 __le32 s_backup_bgs[2]; /* groups with sparse_super2 SBs */
1164 __le32 s_reserved[106]; /* Padding to the end of the block */ 1171 __u8 s_encrypt_algos[4]; /* Encryption algorithms in use */
1172 __le32 s_reserved[105]; /* Padding to the end of the block */
1165 __le32 s_checksum; /* crc32c(superblock) */ 1173 __le32 s_checksum; /* crc32c(superblock) */
1166}; 1174};
1167 1175
@@ -1527,6 +1535,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
1527 * GDT_CSUM bits are mutually exclusive. 1535 * GDT_CSUM bits are mutually exclusive.
1528 */ 1536 */
1529#define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM 0x0400 1537#define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM 0x0400
1538#define EXT4_FEATURE_RO_COMPAT_READONLY 0x1000
1530 1539
1531#define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001 1540#define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001
1532#define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002 1541#define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002
@@ -1542,6 +1551,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
1542#define EXT4_FEATURE_INCOMPAT_BG_USE_META_CSUM 0x2000 /* use crc32c for bg */ 1551#define EXT4_FEATURE_INCOMPAT_BG_USE_META_CSUM 0x2000 /* use crc32c for bg */
1543#define EXT4_FEATURE_INCOMPAT_LARGEDIR 0x4000 /* >2GB or 3-lvl htree */ 1552#define EXT4_FEATURE_INCOMPAT_LARGEDIR 0x4000 /* >2GB or 3-lvl htree */
1544#define EXT4_FEATURE_INCOMPAT_INLINE_DATA 0x8000 /* data in inode */ 1553#define EXT4_FEATURE_INCOMPAT_INLINE_DATA 0x8000 /* data in inode */
1554#define EXT4_FEATURE_INCOMPAT_ENCRYPT 0x10000
1545 1555
1546#define EXT2_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR 1556#define EXT2_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR
1547#define EXT2_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ 1557#define EXT2_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 6b9878a24182..45fe924f82bc 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -1401,10 +1401,7 @@ end_range:
1401 * to free. Everything was covered by the start 1401 * to free. Everything was covered by the start
1402 * of the range. 1402 * of the range.
1403 */ 1403 */
1404 return 0; 1404 goto do_indirects;
1405 } else {
1406 /* Shared branch grows from an indirect block */
1407 partial2--;
1408 } 1405 }
1409 } else { 1406 } else {
1410 /* 1407 /*
@@ -1435,56 +1432,96 @@ end_range:
1435 /* Punch happened within the same level (n == n2) */ 1432 /* Punch happened within the same level (n == n2) */
1436 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 1433 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1437 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2); 1434 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1438 /* 1435
1439 * ext4_find_shared returns Indirect structure which 1436 /* Free top, but only if partial2 isn't its subtree. */
1440 * points to the last element which should not be 1437 if (nr) {
1441 * removed by truncate. But this is end of the range 1438 int level = min(partial - chain, partial2 - chain2);
1442 * in punch_hole so we need to point to the next element 1439 int i;
1443 */ 1440 int subtree = 1;
1444 partial2->p++; 1441
1445 while ((partial > chain) || (partial2 > chain2)) { 1442 for (i = 0; i <= level; i++) {
1446 /* We're at the same block, so we're almost finished */ 1443 if (offsets[i] != offsets2[i]) {
1447 if ((partial->bh && partial2->bh) && 1444 subtree = 0;
1448 (partial->bh->b_blocknr == partial2->bh->b_blocknr)) { 1445 break;
1449 if ((partial > chain) && (partial2 > chain2)) { 1446 }
1447 }
1448
1449 if (!subtree) {
1450 if (partial == chain) {
1451 /* Shared branch grows from the inode */
1452 ext4_free_branches(handle, inode, NULL,
1453 &nr, &nr+1,
1454 (chain+n-1) - partial);
1455 *partial->p = 0;
1456 } else {
1457 /* Shared branch grows from an indirect block */
1458 BUFFER_TRACE(partial->bh, "get_write_access");
1450 ext4_free_branches(handle, inode, partial->bh, 1459 ext4_free_branches(handle, inode, partial->bh,
1451 partial->p + 1, 1460 partial->p,
1452 partial2->p, 1461 partial->p+1,
1453 (chain+n-1) - partial); 1462 (chain+n-1) - partial);
1454 BUFFER_TRACE(partial->bh, "call brelse");
1455 brelse(partial->bh);
1456 BUFFER_TRACE(partial2->bh, "call brelse");
1457 brelse(partial2->bh);
1458 } 1463 }
1459 return 0;
1460 } 1464 }
1465 }
1466
1467 if (!nr2) {
1461 /* 1468 /*
1462 * Clear the ends of indirect blocks on the shared branch 1469 * ext4_find_shared returns Indirect structure which
1463 * at the start of the range 1470 * points to the last element which should not be
1471 * removed by truncate. But this is end of the range
1472 * in punch_hole so we need to point to the next element
1464 */ 1473 */
1465 if (partial > chain) { 1474 partial2->p++;
1475 }
1476
1477 while (partial > chain || partial2 > chain2) {
1478 int depth = (chain+n-1) - partial;
1479 int depth2 = (chain2+n2-1) - partial2;
1480
1481 if (partial > chain && partial2 > chain2 &&
1482 partial->bh->b_blocknr == partial2->bh->b_blocknr) {
1483 /*
1484 * We've converged on the same block. Clear the range,
1485 * then we're done.
1486 */
1466 ext4_free_branches(handle, inode, partial->bh, 1487 ext4_free_branches(handle, inode, partial->bh,
1467 partial->p + 1, 1488 partial->p + 1,
1468 (__le32 *)partial->bh->b_data+addr_per_block, 1489 partial2->p,
1469 (chain+n-1) - partial); 1490 (chain+n-1) - partial);
1470 BUFFER_TRACE(partial->bh, "call brelse"); 1491 BUFFER_TRACE(partial->bh, "call brelse");
1471 brelse(partial->bh); 1492 brelse(partial->bh);
1472 partial--; 1493 BUFFER_TRACE(partial2->bh, "call brelse");
1494 brelse(partial2->bh);
1495 return 0;
1473 } 1496 }
1497
1474 /* 1498 /*
1475 * Clear the ends of indirect blocks on the shared branch 1499 * The start and end partial branches may not be at the same
1476 * at the end of the range 1500 * level even though the punch happened within one level. So, we
1501 * give them a chance to arrive at the same level, then walk
1502 * them in step with each other until we converge on the same
1503 * block.
1477 */ 1504 */
1478 if (partial2 > chain2) { 1505 if (partial > chain && depth <= depth2) {
1506 ext4_free_branches(handle, inode, partial->bh,
1507 partial->p + 1,
1508 (__le32 *)partial->bh->b_data+addr_per_block,
1509 (chain+n-1) - partial);
1510 BUFFER_TRACE(partial->bh, "call brelse");
1511 brelse(partial->bh);
1512 partial--;
1513 }
1514 if (partial2 > chain2 && depth2 <= depth) {
1479 ext4_free_branches(handle, inode, partial2->bh, 1515 ext4_free_branches(handle, inode, partial2->bh,
1480 (__le32 *)partial2->bh->b_data, 1516 (__le32 *)partial2->bh->b_data,
1481 partial2->p, 1517 partial2->p,
1482 (chain2+n-1) - partial2); 1518 (chain2+n2-1) - partial2);
1483 BUFFER_TRACE(partial2->bh, "call brelse"); 1519 BUFFER_TRACE(partial2->bh, "call brelse");
1484 brelse(partial2->bh); 1520 brelse(partial2->bh);
1485 partial2--; 1521 partial2--;
1486 } 1522 }
1487 } 1523 }
1524 return 0;
1488 1525
1489do_indirects: 1526do_indirects:
1490 /* Kill the remaining (whole) subtrees */ 1527 /* Kill the remaining (whole) subtrees */
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 85404f15e53a..5cb9a212b86f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1024,6 +1024,7 @@ static int ext4_write_end(struct file *file,
1024{ 1024{
1025 handle_t *handle = ext4_journal_current_handle(); 1025 handle_t *handle = ext4_journal_current_handle();
1026 struct inode *inode = mapping->host; 1026 struct inode *inode = mapping->host;
1027 loff_t old_size = inode->i_size;
1027 int ret = 0, ret2; 1028 int ret = 0, ret2;
1028 int i_size_changed = 0; 1029 int i_size_changed = 0;
1029 1030
@@ -1054,6 +1055,8 @@ static int ext4_write_end(struct file *file,
1054 unlock_page(page); 1055 unlock_page(page);
1055 page_cache_release(page); 1056 page_cache_release(page);
1056 1057
1058 if (old_size < pos)
1059 pagecache_isize_extended(inode, old_size, pos);
1057 /* 1060 /*
1058 * Don't mark the inode dirty under page lock. First, it unnecessarily 1061 * Don't mark the inode dirty under page lock. First, it unnecessarily
1059 * makes the holding time of page lock longer. Second, it forces lock 1062 * makes the holding time of page lock longer. Second, it forces lock
@@ -1095,6 +1098,7 @@ static int ext4_journalled_write_end(struct file *file,
1095{ 1098{
1096 handle_t *handle = ext4_journal_current_handle(); 1099 handle_t *handle = ext4_journal_current_handle();
1097 struct inode *inode = mapping->host; 1100 struct inode *inode = mapping->host;
1101 loff_t old_size = inode->i_size;
1098 int ret = 0, ret2; 1102 int ret = 0, ret2;
1099 int partial = 0; 1103 int partial = 0;
1100 unsigned from, to; 1104 unsigned from, to;
@@ -1127,6 +1131,9 @@ static int ext4_journalled_write_end(struct file *file,
1127 unlock_page(page); 1131 unlock_page(page);
1128 page_cache_release(page); 1132 page_cache_release(page);
1129 1133
1134 if (old_size < pos)
1135 pagecache_isize_extended(inode, old_size, pos);
1136
1130 if (size_changed) { 1137 if (size_changed) {
1131 ret2 = ext4_mark_inode_dirty(handle, inode); 1138 ret2 = ext4_mark_inode_dirty(handle, inode);
1132 if (!ret) 1139 if (!ret)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 1adac6868e6f..e061e66c8280 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2779,6 +2779,12 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
2779 if (readonly) 2779 if (readonly)
2780 return 1; 2780 return 1;
2781 2781
2782 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_READONLY)) {
2783 ext4_msg(sb, KERN_INFO, "filesystem is read-only");
2784 sb->s_flags |= MS_RDONLY;
2785 return 1;
2786 }
2787
2782 /* Check that feature set is OK for a read-write mount */ 2788 /* Check that feature set is OK for a read-write mount */
2783 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) { 2789 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) {
2784 ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of " 2790 ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
@@ -3936,9 +3942,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3936 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 3942 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
3937 spin_lock_init(&sbi->s_next_gen_lock); 3943 spin_lock_init(&sbi->s_next_gen_lock);
3938 3944
3939 init_timer(&sbi->s_err_report); 3945 setup_timer(&sbi->s_err_report, print_daily_error_info,
3940 sbi->s_err_report.function = print_daily_error_info; 3946 (unsigned long) sb);
3941 sbi->s_err_report.data = (unsigned long) sb;
3942 3947
3943 /* Register extent status tree shrinker */ 3948 /* Register extent status tree shrinker */
3944 if (ext4_es_register_shrinker(sbi)) 3949 if (ext4_es_register_shrinker(sbi))
@@ -4866,9 +4871,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4866 if (sbi->s_journal && sbi->s_journal->j_task->io_context) 4871 if (sbi->s_journal && sbi->s_journal->j_task->io_context)
4867 journal_ioprio = sbi->s_journal->j_task->io_context->ioprio; 4872 journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
4868 4873
4869 /*
4870 * Allow the "check" option to be passed as a remount option.
4871 */
4872 if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) { 4874 if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
4873 err = -EINVAL; 4875 err = -EINVAL;
4874 goto restore_opts; 4876 goto restore_opts;
@@ -4877,17 +4879,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4877 if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ 4879 if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
4878 test_opt(sb, JOURNAL_CHECKSUM)) { 4880 test_opt(sb, JOURNAL_CHECKSUM)) {
4879 ext4_msg(sb, KERN_ERR, "changing journal_checksum " 4881 ext4_msg(sb, KERN_ERR, "changing journal_checksum "
4880 "during remount not supported"); 4882 "during remount not supported; ignoring");
4881 err = -EINVAL; 4883 sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
4882 goto restore_opts;
4883 }
4884
4885 if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
4886 test_opt(sb, JOURNAL_CHECKSUM)) {
4887 ext4_msg(sb, KERN_ERR, "changing journal_checksum "
4888 "during remount not supported");
4889 err = -EINVAL;
4890 goto restore_opts;
4891 } 4884 }
4892 4885
4893 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 4886 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
@@ -4963,7 +4956,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4963 ext4_mark_recovery_complete(sb, es); 4956 ext4_mark_recovery_complete(sb, es);
4964 } else { 4957 } else {
4965 /* Make sure we can mount this feature set readwrite */ 4958 /* Make sure we can mount this feature set readwrite */
4966 if (!ext4_feature_set_ok(sb, 0)) { 4959 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4960 EXT4_FEATURE_RO_COMPAT_READONLY) ||
4961 !ext4_feature_set_ok(sb, 0)) {
4967 err = -EROFS; 4962 err = -EROFS;
4968 goto restore_opts; 4963 goto restore_opts;
4969 } 4964 }
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 073657f755d4..e907052eeadb 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -769,9 +769,9 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
769 struct inode *inode = wb_inode(wb->b_io.prev); 769 struct inode *inode = wb_inode(wb->b_io.prev);
770 struct super_block *sb = inode->i_sb; 770 struct super_block *sb = inode->i_sb;
771 771
772 if (!grab_super_passive(sb)) { 772 if (!trylock_super(sb)) {
773 /* 773 /*
774 * grab_super_passive() may fail consistently due to 774 * trylock_super() may fail consistently due to
775 * s_umount being grabbed by someone else. Don't use 775 * s_umount being grabbed by someone else. Don't use
776 * requeue_io() to avoid busy retrying the inode/sb. 776 * requeue_io() to avoid busy retrying the inode/sb.
777 */ 777 */
@@ -779,7 +779,7 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
779 continue; 779 continue;
780 } 780 }
781 wrote += writeback_sb_inodes(sb, wb, work); 781 wrote += writeback_sb_inodes(sb, wb, work);
782 drop_super(sb); 782 up_read(&sb->s_umount);
783 783
784 /* refer to the same tests at the end of writeback_sb_inodes */ 784 /* refer to the same tests at the end of writeback_sb_inodes */
785 if (wrote) { 785 if (wrote) {
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 08e7b1a9d5d0..1545b711ddcf 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -971,7 +971,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
971 err = -EBUSY; 971 err = -EBUSY;
972 goto badentry; 972 goto badentry;
973 } 973 }
974 if (S_ISDIR(entry->d_inode->i_mode)) { 974 if (d_is_dir(entry)) {
975 shrink_dcache_parent(entry); 975 shrink_dcache_parent(entry);
976 if (!simple_empty(entry)) { 976 if (!simple_empty(entry)) {
977 err = -ENOTEMPTY; 977 err = -ENOTEMPTY;
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 6371192961e2..487527b42d94 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1809,7 +1809,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
1809 gfs2_consist_inode(dip); 1809 gfs2_consist_inode(dip);
1810 dip->i_entries--; 1810 dip->i_entries--;
1811 dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv; 1811 dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv;
1812 if (S_ISDIR(dentry->d_inode->i_mode)) 1812 if (d_is_dir(dentry))
1813 drop_nlink(&dip->i_inode); 1813 drop_nlink(&dip->i_inode);
1814 mark_inode_dirty(&dip->i_inode); 1814 mark_inode_dirty(&dip->i_inode);
1815 1815
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 435bea231cc6..f0235c1640af 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -530,7 +530,7 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
530 530
531 /* Unlink destination if it already exists */ 531 /* Unlink destination if it already exists */
532 if (new_dentry->d_inode) { 532 if (new_dentry->d_inode) {
533 if (S_ISDIR(new_dentry->d_inode->i_mode)) 533 if (d_is_dir(new_dentry))
534 res = hfsplus_rmdir(new_dir, new_dentry); 534 res = hfsplus_rmdir(new_dir, new_dentry);
535 else 535 else
536 res = hfsplus_unlink(new_dir, new_dentry); 536 res = hfsplus_unlink(new_dir, new_dentry);
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index 5f2755117ce7..043ac9d77262 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -678,10 +678,10 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry)
678 return NULL; 678 return NULL;
679 } 679 }
680 680
681 if (S_ISDIR(dentry->d_inode->i_mode)) { 681 if (d_is_dir(dentry)) {
682 inode->i_op = &hppfs_dir_iops; 682 inode->i_op = &hppfs_dir_iops;
683 inode->i_fop = &hppfs_dir_fops; 683 inode->i_fop = &hppfs_dir_fops;
684 } else if (S_ISLNK(dentry->d_inode->i_mode)) { 684 } else if (d_is_symlink(dentry)) {
685 inode->i_op = &hppfs_link_iops; 685 inode->i_op = &hppfs_link_iops;
686 inode->i_fop = &hppfs_file_fops; 686 inode->i_fop = &hppfs_file_fops;
687 } else { 687 } else {
diff --git a/fs/internal.h b/fs/internal.h
index 30459dab409d..01dce1d1476b 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -84,7 +84,7 @@ extern struct file *get_empty_filp(void);
84 * super.c 84 * super.c
85 */ 85 */
86extern int do_remount_sb(struct super_block *, int, void *, int); 86extern int do_remount_sb(struct super_block *, int, void *, int);
87extern bool grab_super_passive(struct super_block *sb); 87extern bool trylock_super(struct super_block *sb);
88extern struct dentry *mount_fs(struct file_system_type *, 88extern struct dentry *mount_fs(struct file_system_type *,
89 int, const char *, void *); 89 int, const char *, void *);
90extern struct super_block *user_get_super(dev_t); 90extern struct super_block *user_get_super(dev_t);
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index bcbef08a4d8f..b5128c6e63ad 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -524,6 +524,9 @@ static int do_one_pass(journal_t *journal,
524 if (descr_csum_size > 0 && 524 if (descr_csum_size > 0 &&
525 !jbd2_descr_block_csum_verify(journal, 525 !jbd2_descr_block_csum_verify(journal,
526 bh->b_data)) { 526 bh->b_data)) {
527 printk(KERN_ERR "JBD2: Invalid checksum "
528 "recovering block %lu in log\n",
529 next_log_block);
527 err = -EIO; 530 err = -EIO;
528 brelse(bh); 531 brelse(bh);
529 goto failed; 532 goto failed;
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
index 92e0644bf867..556de100ebd5 100644
--- a/fs/jffs2/compr_rubin.c
+++ b/fs/jffs2/compr_rubin.c
@@ -84,11 +84,6 @@ static inline int pullbit(struct pushpull *pp)
84 return bit; 84 return bit;
85} 85}
86 86
87static inline int pulledbits(struct pushpull *pp)
88{
89 return pp->ofs;
90}
91
92 87
93static void init_rubin(struct rubin_state *rs, int div, int *bits) 88static void init_rubin(struct rubin_state *rs, int div, int *bits)
94{ 89{
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 938556025d64..f21b6fb5e4c4 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -252,7 +252,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
252 if (!f->inocache) 252 if (!f->inocache)
253 return -EIO; 253 return -EIO;
254 254
255 if (S_ISDIR(old_dentry->d_inode->i_mode)) 255 if (d_is_dir(old_dentry))
256 return -EPERM; 256 return -EPERM;
257 257
258 /* XXX: This is ugly */ 258 /* XXX: This is ugly */
@@ -772,7 +772,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
772 */ 772 */
773 if (new_dentry->d_inode) { 773 if (new_dentry->d_inode) {
774 victim_f = JFFS2_INODE_INFO(new_dentry->d_inode); 774 victim_f = JFFS2_INODE_INFO(new_dentry->d_inode);
775 if (S_ISDIR(new_dentry->d_inode->i_mode)) { 775 if (d_is_dir(new_dentry)) {
776 struct jffs2_full_dirent *fd; 776 struct jffs2_full_dirent *fd;
777 777
778 mutex_lock(&victim_f->sem); 778 mutex_lock(&victim_f->sem);
@@ -807,7 +807,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
807 807
808 if (victim_f) { 808 if (victim_f) {
809 /* There was a victim. Kill it off nicely */ 809 /* There was a victim. Kill it off nicely */
810 if (S_ISDIR(new_dentry->d_inode->i_mode)) 810 if (d_is_dir(new_dentry))
811 clear_nlink(new_dentry->d_inode); 811 clear_nlink(new_dentry->d_inode);
812 else 812 else
813 drop_nlink(new_dentry->d_inode); 813 drop_nlink(new_dentry->d_inode);
@@ -815,7 +815,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
815 inode which didn't exist. */ 815 inode which didn't exist. */
816 if (victim_f->inocache) { 816 if (victim_f->inocache) {
817 mutex_lock(&victim_f->sem); 817 mutex_lock(&victim_f->sem);
818 if (S_ISDIR(new_dentry->d_inode->i_mode)) 818 if (d_is_dir(new_dentry))
819 victim_f->inocache->pino_nlink = 0; 819 victim_f->inocache->pino_nlink = 0;
820 else 820 else
821 victim_f->inocache->pino_nlink--; 821 victim_f->inocache->pino_nlink--;
@@ -825,7 +825,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
825 825
826 /* If it was a directory we moved, and there was no victim, 826 /* If it was a directory we moved, and there was no victim,
827 increase i_nlink on its new parent */ 827 increase i_nlink on its new parent */
828 if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f) 828 if (d_is_dir(old_dentry) && !victim_f)
829 inc_nlink(new_dir_i); 829 inc_nlink(new_dir_i);
830 830
831 /* Unlink the original */ 831 /* Unlink the original */
@@ -839,7 +839,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
839 struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode); 839 struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
840 mutex_lock(&f->sem); 840 mutex_lock(&f->sem);
841 inc_nlink(old_dentry->d_inode); 841 inc_nlink(old_dentry->d_inode);
842 if (f->inocache && !S_ISDIR(old_dentry->d_inode->i_mode)) 842 if (f->inocache && !d_is_dir(old_dentry))
843 f->inocache->pino_nlink++; 843 f->inocache->pino_nlink++;
844 mutex_unlock(&f->sem); 844 mutex_unlock(&f->sem);
845 845
@@ -852,7 +852,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
852 return ret; 852 return ret;
853 } 853 }
854 854
855 if (S_ISDIR(old_dentry->d_inode->i_mode)) 855 if (d_is_dir(old_dentry))
856 drop_nlink(old_dir_i); 856 drop_nlink(old_dir_i);
857 857
858 new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now); 858 new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 7654e87b0428..9ad5ba4b299b 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -510,6 +510,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
510 sumlen = c->sector_size - je32_to_cpu(sm->offset); 510 sumlen = c->sector_size - je32_to_cpu(sm->offset);
511 sumptr = buf + buf_size - sumlen; 511 sumptr = buf + buf_size - sumlen;
512 512
513 /* sm->offset maybe wrong but MAGIC maybe right */
514 if (sumlen > c->sector_size)
515 goto full_scan;
516
513 /* Now, make sure the summary itself is available */ 517 /* Now, make sure the summary itself is available */
514 if (sumlen > buf_size) { 518 if (sumlen > buf_size) {
515 /* Need to kmalloc for this. */ 519 /* Need to kmalloc for this. */
@@ -544,6 +548,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
544 } 548 }
545 } 549 }
546 550
551full_scan:
547 buf_ofs = jeb->offset; 552 buf_ofs = jeb->offset;
548 553
549 if (!buf_size) { 554 if (!buf_size) {
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 0918f0e2e266..3d76f28a2ba9 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -138,7 +138,7 @@ static struct dentry *jffs2_get_parent(struct dentry *child)
138 struct jffs2_inode_info *f; 138 struct jffs2_inode_info *f;
139 uint32_t pino; 139 uint32_t pino;
140 140
141 BUG_ON(!S_ISDIR(child->d_inode->i_mode)); 141 BUG_ON(!d_is_dir(child));
142 142
143 f = JFFS2_INODE_INFO(child->d_inode); 143 f = JFFS2_INODE_INFO(child->d_inode);
144 144
diff --git a/fs/libfs.c b/fs/libfs.c
index b2ffdb045be4..0ab65122ee45 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -329,7 +329,7 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
329 struct inode *new_dir, struct dentry *new_dentry) 329 struct inode *new_dir, struct dentry *new_dentry)
330{ 330{
331 struct inode *inode = old_dentry->d_inode; 331 struct inode *inode = old_dentry->d_inode;
332 int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode); 332 int they_are_dirs = d_is_dir(old_dentry);
333 333
334 if (!simple_empty(new_dentry)) 334 if (!simple_empty(new_dentry))
335 return -ENOTEMPTY; 335 return -ENOTEMPTY;
diff --git a/fs/locks.c b/fs/locks.c
index 4753218f308e..365c82e1b3a9 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -681,21 +681,18 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
681} 681}
682 682
683static void 683static void
684locks_insert_lock_ctx(struct file_lock *fl, int *counter, 684locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
685 struct list_head *before)
686{ 685{
687 fl->fl_nspid = get_pid(task_tgid(current)); 686 fl->fl_nspid = get_pid(task_tgid(current));
688 list_add_tail(&fl->fl_list, before); 687 list_add_tail(&fl->fl_list, before);
689 ++*counter;
690 locks_insert_global_locks(fl); 688 locks_insert_global_locks(fl);
691} 689}
692 690
693static void 691static void
694locks_unlink_lock_ctx(struct file_lock *fl, int *counter) 692locks_unlink_lock_ctx(struct file_lock *fl)
695{ 693{
696 locks_delete_global_locks(fl); 694 locks_delete_global_locks(fl);
697 list_del_init(&fl->fl_list); 695 list_del_init(&fl->fl_list);
698 --*counter;
699 if (fl->fl_nspid) { 696 if (fl->fl_nspid) {
700 put_pid(fl->fl_nspid); 697 put_pid(fl->fl_nspid);
701 fl->fl_nspid = NULL; 698 fl->fl_nspid = NULL;
@@ -704,10 +701,9 @@ locks_unlink_lock_ctx(struct file_lock *fl, int *counter)
704} 701}
705 702
706static void 703static void
707locks_delete_lock_ctx(struct file_lock *fl, int *counter, 704locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
708 struct list_head *dispose)
709{ 705{
710 locks_unlink_lock_ctx(fl, counter); 706 locks_unlink_lock_ctx(fl);
711 if (dispose) 707 if (dispose)
712 list_add(&fl->fl_list, dispose); 708 list_add(&fl->fl_list, dispose);
713 else 709 else
@@ -895,7 +891,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
895 if (request->fl_type == fl->fl_type) 891 if (request->fl_type == fl->fl_type)
896 goto out; 892 goto out;
897 found = true; 893 found = true;
898 locks_delete_lock_ctx(fl, &ctx->flc_flock_cnt, &dispose); 894 locks_delete_lock_ctx(fl, &dispose);
899 break; 895 break;
900 } 896 }
901 897
@@ -905,16 +901,6 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
905 goto out; 901 goto out;
906 } 902 }
907 903
908 /*
909 * If a higher-priority process was blocked on the old file lock,
910 * give it the opportunity to lock the file.
911 */
912 if (found) {
913 spin_unlock(&ctx->flc_lock);
914 cond_resched();
915 spin_lock(&ctx->flc_lock);
916 }
917
918find_conflict: 904find_conflict:
919 list_for_each_entry(fl, &ctx->flc_flock, fl_list) { 905 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
920 if (!flock_locks_conflict(request, fl)) 906 if (!flock_locks_conflict(request, fl))
@@ -929,7 +915,7 @@ find_conflict:
929 if (request->fl_flags & FL_ACCESS) 915 if (request->fl_flags & FL_ACCESS)
930 goto out; 916 goto out;
931 locks_copy_lock(new_fl, request); 917 locks_copy_lock(new_fl, request);
932 locks_insert_lock_ctx(new_fl, &ctx->flc_flock_cnt, &ctx->flc_flock); 918 locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
933 new_fl = NULL; 919 new_fl = NULL;
934 error = 0; 920 error = 0;
935 921
@@ -1046,8 +1032,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
1046 else 1032 else
1047 request->fl_end = fl->fl_end; 1033 request->fl_end = fl->fl_end;
1048 if (added) { 1034 if (added) {
1049 locks_delete_lock_ctx(fl, &ctx->flc_posix_cnt, 1035 locks_delete_lock_ctx(fl, &dispose);
1050 &dispose);
1051 continue; 1036 continue;
1052 } 1037 }
1053 request = fl; 1038 request = fl;
@@ -1076,8 +1061,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
1076 * one (This may happen several times). 1061 * one (This may happen several times).
1077 */ 1062 */
1078 if (added) { 1063 if (added) {
1079 locks_delete_lock_ctx(fl, 1064 locks_delete_lock_ctx(fl, &dispose);
1080 &ctx->flc_posix_cnt, &dispose);
1081 continue; 1065 continue;
1082 } 1066 }
1083 /* 1067 /*
@@ -1093,10 +1077,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
1093 locks_copy_lock(new_fl, request); 1077 locks_copy_lock(new_fl, request);
1094 request = new_fl; 1078 request = new_fl;
1095 new_fl = NULL; 1079 new_fl = NULL;
1096 locks_insert_lock_ctx(request, 1080 locks_insert_lock_ctx(request, &fl->fl_list);
1097 &ctx->flc_posix_cnt, &fl->fl_list); 1081 locks_delete_lock_ctx(fl, &dispose);
1098 locks_delete_lock_ctx(fl,
1099 &ctx->flc_posix_cnt, &dispose);
1100 added = true; 1082 added = true;
1101 } 1083 }
1102 } 1084 }
@@ -1124,8 +1106,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
1124 goto out; 1106 goto out;
1125 } 1107 }
1126 locks_copy_lock(new_fl, request); 1108 locks_copy_lock(new_fl, request);
1127 locks_insert_lock_ctx(new_fl, &ctx->flc_posix_cnt, 1109 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1128 &fl->fl_list); 1110 fl = new_fl;
1129 new_fl = NULL; 1111 new_fl = NULL;
1130 } 1112 }
1131 if (right) { 1113 if (right) {
@@ -1136,8 +1118,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
1136 left = new_fl2; 1118 left = new_fl2;
1137 new_fl2 = NULL; 1119 new_fl2 = NULL;
1138 locks_copy_lock(left, right); 1120 locks_copy_lock(left, right);
1139 locks_insert_lock_ctx(left, &ctx->flc_posix_cnt, 1121 locks_insert_lock_ctx(left, &fl->fl_list);
1140 &fl->fl_list);
1141 } 1122 }
1142 right->fl_start = request->fl_end + 1; 1123 right->fl_start = request->fl_end + 1;
1143 locks_wake_up_blocks(right); 1124 locks_wake_up_blocks(right);
@@ -1321,7 +1302,6 @@ static void lease_clear_pending(struct file_lock *fl, int arg)
1321/* We already had a lease on this file; just change its type */ 1302/* We already had a lease on this file; just change its type */
1322int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) 1303int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1323{ 1304{
1324 struct file_lock_context *flctx;
1325 int error = assign_type(fl, arg); 1305 int error = assign_type(fl, arg);
1326 1306
1327 if (error) 1307 if (error)
@@ -1331,7 +1311,6 @@ int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1331 if (arg == F_UNLCK) { 1311 if (arg == F_UNLCK) {
1332 struct file *filp = fl->fl_file; 1312 struct file *filp = fl->fl_file;
1333 1313
1334 flctx = file_inode(filp)->i_flctx;
1335 f_delown(filp); 1314 f_delown(filp);
1336 filp->f_owner.signum = 0; 1315 filp->f_owner.signum = 0;
1337 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); 1316 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
@@ -1339,7 +1318,7 @@ int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1339 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 1318 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1340 fl->fl_fasync = NULL; 1319 fl->fl_fasync = NULL;
1341 } 1320 }
1342 locks_delete_lock_ctx(fl, &flctx->flc_lease_cnt, dispose); 1321 locks_delete_lock_ctx(fl, dispose);
1343 } 1322 }
1344 return 0; 1323 return 0;
1345} 1324}
@@ -1456,8 +1435,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1456 fl->fl_downgrade_time = break_time; 1435 fl->fl_downgrade_time = break_time;
1457 } 1436 }
1458 if (fl->fl_lmops->lm_break(fl)) 1437 if (fl->fl_lmops->lm_break(fl))
1459 locks_delete_lock_ctx(fl, &ctx->flc_lease_cnt, 1438 locks_delete_lock_ctx(fl, &dispose);
1460 &dispose);
1461 } 1439 }
1462 1440
1463 if (list_empty(&ctx->flc_lease)) 1441 if (list_empty(&ctx->flc_lease))
@@ -1697,7 +1675,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
1697 if (!leases_enable) 1675 if (!leases_enable)
1698 goto out; 1676 goto out;
1699 1677
1700 locks_insert_lock_ctx(lease, &ctx->flc_lease_cnt, &ctx->flc_lease); 1678 locks_insert_lock_ctx(lease, &ctx->flc_lease);
1701 /* 1679 /*
1702 * The check in break_lease() is lockless. It's possible for another 1680 * The check in break_lease() is lockless. It's possible for another
1703 * open to race in after we did the earlier check for a conflicting 1681 * open to race in after we did the earlier check for a conflicting
@@ -1710,7 +1688,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
1710 smp_mb(); 1688 smp_mb();
1711 error = check_conflicting_open(dentry, arg, lease->fl_flags); 1689 error = check_conflicting_open(dentry, arg, lease->fl_flags);
1712 if (error) { 1690 if (error) {
1713 locks_unlink_lock_ctx(lease, &ctx->flc_lease_cnt); 1691 locks_unlink_lock_ctx(lease);
1714 goto out; 1692 goto out;
1715 } 1693 }
1716 1694
@@ -2448,7 +2426,8 @@ locks_remove_lease(struct file *filp)
2448 2426
2449 spin_lock(&ctx->flc_lock); 2427 spin_lock(&ctx->flc_lock);
2450 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) 2428 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2451 lease_modify(fl, F_UNLCK, &dispose); 2429 if (filp == fl->fl_file)
2430 lease_modify(fl, F_UNLCK, &dispose);
2452 spin_unlock(&ctx->flc_lock); 2431 spin_unlock(&ctx->flc_lock);
2453 locks_dispose_list(&dispose); 2432 locks_dispose_list(&dispose);
2454} 2433}
diff --git a/fs/namei.c b/fs/namei.c
index 96ca11dea4a2..c83145af4bfc 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2814,7 +2814,7 @@ no_open:
2814 } else if (!dentry->d_inode) { 2814 } else if (!dentry->d_inode) {
2815 goto out; 2815 goto out;
2816 } else if ((open_flag & O_TRUNC) && 2816 } else if ((open_flag & O_TRUNC) &&
2817 S_ISREG(dentry->d_inode->i_mode)) { 2817 d_is_reg(dentry)) {
2818 goto out; 2818 goto out;
2819 } 2819 }
2820 /* will fail later, go on to get the right error */ 2820 /* will fail later, go on to get the right error */
diff --git a/fs/namespace.c b/fs/namespace.c
index 72a286e0d33e..82ef1405260e 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1907,8 +1907,8 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
1907 if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER) 1907 if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER)
1908 return -EINVAL; 1908 return -EINVAL;
1909 1909
1910 if (S_ISDIR(mp->m_dentry->d_inode->i_mode) != 1910 if (d_is_dir(mp->m_dentry) !=
1911 S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode)) 1911 d_is_dir(mnt->mnt.mnt_root))
1912 return -ENOTDIR; 1912 return -ENOTDIR;
1913 1913
1914 return attach_recursive_mnt(mnt, p, mp, NULL); 1914 return attach_recursive_mnt(mnt, p, mp, NULL);
@@ -2180,8 +2180,8 @@ static int do_move_mount(struct path *path, const char *old_name)
2180 if (!mnt_has_parent(old)) 2180 if (!mnt_has_parent(old))
2181 goto out1; 2181 goto out1;
2182 2182
2183 if (S_ISDIR(path->dentry->d_inode->i_mode) != 2183 if (d_is_dir(path->dentry) !=
2184 S_ISDIR(old_path.dentry->d_inode->i_mode)) 2184 d_is_dir(old_path.dentry))
2185 goto out1; 2185 goto out1;
2186 /* 2186 /*
2187 * Don't move a mount residing in a shared parent. 2187 * Don't move a mount residing in a shared parent.
@@ -2271,7 +2271,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
2271 goto unlock; 2271 goto unlock;
2272 2272
2273 err = -EINVAL; 2273 err = -EINVAL;
2274 if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode)) 2274 if (d_is_symlink(newmnt->mnt.mnt_root))
2275 goto unlock; 2275 goto unlock;
2276 2276
2277 newmnt->mnt.mnt_flags = mnt_flags; 2277 newmnt->mnt.mnt_flags = mnt_flags;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index e36a9d78ea49..197806fb87ff 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -427,6 +427,8 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
427 if (clp == NULL) 427 if (clp == NULL)
428 goto out; 428 goto out;
429 429
430 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
431 goto out;
430 tbl = &clp->cl_session->bc_slot_table; 432 tbl = &clp->cl_session->bc_slot_table;
431 433
432 spin_lock(&tbl->slot_tbl_lock); 434 spin_lock(&tbl->slot_tbl_lock);
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index f4ccfe6521ec..19ca95cdfd9b 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -313,7 +313,7 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
313 goto out; 313 goto out;
314 } 314 }
315 315
316 args->devs = kmalloc(n * sizeof(*args->devs), GFP_KERNEL); 316 args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL);
317 if (!args->devs) { 317 if (!args->devs) {
318 status = htonl(NFS4ERR_DELAY); 318 status = htonl(NFS4ERR_DELAY);
319 goto out; 319 goto out;
@@ -415,7 +415,7 @@ static __be32 decode_rc_list(struct xdr_stream *xdr,
415 rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t)); 415 rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
416 if (unlikely(p == NULL)) 416 if (unlikely(p == NULL))
417 goto out; 417 goto out;
418 rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls * 418 rc_list->rcl_refcalls = kmalloc_array(rc_list->rcl_nrefcalls,
419 sizeof(*rc_list->rcl_refcalls), 419 sizeof(*rc_list->rcl_refcalls),
420 GFP_KERNEL); 420 GFP_KERNEL);
421 if (unlikely(rc_list->rcl_refcalls == NULL)) 421 if (unlikely(rc_list->rcl_refcalls == NULL))
@@ -464,8 +464,10 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
464 464
465 for (i = 0; i < args->csa_nrclists; i++) { 465 for (i = 0; i < args->csa_nrclists; i++) {
466 status = decode_rc_list(xdr, &args->csa_rclists[i]); 466 status = decode_rc_list(xdr, &args->csa_rclists[i]);
467 if (status) 467 if (status) {
468 args->csa_nrclists = i;
468 goto out_free; 469 goto out_free;
470 }
469 } 471 }
470 } 472 }
471 status = 0; 473 status = 0;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index da5433230bb1..a1f0685b42ff 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -180,7 +180,6 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
180 delegation->cred = get_rpccred(cred); 180 delegation->cred = get_rpccred(cred);
181 clear_bit(NFS_DELEGATION_NEED_RECLAIM, 181 clear_bit(NFS_DELEGATION_NEED_RECLAIM,
182 &delegation->flags); 182 &delegation->flags);
183 NFS_I(inode)->delegation_state = delegation->type;
184 spin_unlock(&delegation->lock); 183 spin_unlock(&delegation->lock);
185 put_rpccred(oldcred); 184 put_rpccred(oldcred);
186 rcu_read_unlock(); 185 rcu_read_unlock();
@@ -275,7 +274,6 @@ nfs_detach_delegation_locked(struct nfs_inode *nfsi,
275 set_bit(NFS_DELEGATION_RETURNING, &delegation->flags); 274 set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
276 list_del_rcu(&delegation->super_list); 275 list_del_rcu(&delegation->super_list);
277 delegation->inode = NULL; 276 delegation->inode = NULL;
278 nfsi->delegation_state = 0;
279 rcu_assign_pointer(nfsi->delegation, NULL); 277 rcu_assign_pointer(nfsi->delegation, NULL);
280 spin_unlock(&delegation->lock); 278 spin_unlock(&delegation->lock);
281 return delegation; 279 return delegation;
@@ -355,7 +353,6 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
355 &delegation->stateid)) { 353 &delegation->stateid)) {
356 nfs_update_inplace_delegation(old_delegation, 354 nfs_update_inplace_delegation(old_delegation,
357 delegation); 355 delegation);
358 nfsi->delegation_state = old_delegation->type;
359 goto out; 356 goto out;
360 } 357 }
361 /* 358 /*
@@ -379,7 +376,6 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
379 goto out; 376 goto out;
380 } 377 }
381 list_add_rcu(&delegation->super_list, &server->delegations); 378 list_add_rcu(&delegation->super_list, &server->delegations);
382 nfsi->delegation_state = delegation->type;
383 rcu_assign_pointer(nfsi->delegation, delegation); 379 rcu_assign_pointer(nfsi->delegation, delegation);
384 delegation = NULL; 380 delegation = NULL;
385 381
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 7077521acdf4..e907c8cf732e 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -283,7 +283,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
283void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, 283void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
284 struct nfs_direct_req *dreq) 284 struct nfs_direct_req *dreq)
285{ 285{
286 cinfo->lock = &dreq->lock; 286 cinfo->lock = &dreq->inode->i_lock;
287 cinfo->mds = &dreq->mds_cinfo; 287 cinfo->mds = &dreq->mds_cinfo;
288 cinfo->ds = &dreq->ds_cinfo; 288 cinfo->ds = &dreq->ds_cinfo;
289 cinfo->dreq = dreq; 289 cinfo->dreq = dreq;
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 7ae1c263c5cf..91e88a7ecef0 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -960,52 +960,19 @@ filelayout_mark_request_commit(struct nfs_page *req,
960{ 960{
961 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); 961 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
962 u32 i, j; 962 u32 i, j;
963 struct list_head *list;
964 struct pnfs_commit_bucket *buckets;
965 963
966 if (fl->commit_through_mds) { 964 if (fl->commit_through_mds) {
967 list = &cinfo->mds->list; 965 nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
968 spin_lock(cinfo->lock); 966 } else {
969 goto mds_commit; 967 /* Note that we are calling nfs4_fl_calc_j_index on each page
970 } 968 * that ends up being committed to a data server. An attractive
971 969 * alternative is to add a field to nfs_write_data and nfs_page
972 /* Note that we are calling nfs4_fl_calc_j_index on each page 970 * to store the value calculated in filelayout_write_pagelist
973 * that ends up being committed to a data server. An attractive 971 * and just use that here.
974 * alternative is to add a field to nfs_write_data and nfs_page
975 * to store the value calculated in filelayout_write_pagelist
976 * and just use that here.
977 */
978 j = nfs4_fl_calc_j_index(lseg, req_offset(req));
979 i = select_bucket_index(fl, j);
980 spin_lock(cinfo->lock);
981 buckets = cinfo->ds->buckets;
982 list = &buckets[i].written;
983 if (list_empty(list)) {
984 /* Non-empty buckets hold a reference on the lseg. That ref
985 * is normally transferred to the COMMIT call and released
986 * there. It could also be released if the last req is pulled
987 * off due to a rewrite, in which case it will be done in
988 * pnfs_generic_clear_request_commit
989 */ 972 */
990 buckets[i].wlseg = pnfs_get_lseg(lseg); 973 j = nfs4_fl_calc_j_index(lseg, req_offset(req));
991 } 974 i = select_bucket_index(fl, j);
992 set_bit(PG_COMMIT_TO_DS, &req->wb_flags); 975 pnfs_layout_mark_request_commit(req, lseg, cinfo, i);
993 cinfo->ds->nwritten++;
994
995mds_commit:
996 /* nfs_request_add_commit_list(). We need to add req to list without
997 * dropping cinfo lock.
998 */
999 set_bit(PG_CLEAN, &(req)->wb_flags);
1000 nfs_list_add_request(req, list);
1001 cinfo->mds->ncommit++;
1002 spin_unlock(cinfo->lock);
1003 if (!cinfo->dreq) {
1004 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1005 inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
1006 BDI_RECLAIMABLE);
1007 __mark_inode_dirty(req->wb_context->dentry->d_inode,
1008 I_DIRTY_DATASYNC);
1009 } 976 }
1010} 977}
1011 978
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index c22ecaa86c1c..315cc68945b9 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1332,47 +1332,6 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1332 return PNFS_ATTEMPTED; 1332 return PNFS_ATTEMPTED;
1333} 1333}
1334 1334
1335static void
1336ff_layout_mark_request_commit(struct nfs_page *req,
1337 struct pnfs_layout_segment *lseg,
1338 struct nfs_commit_info *cinfo,
1339 u32 ds_commit_idx)
1340{
1341 struct list_head *list;
1342 struct pnfs_commit_bucket *buckets;
1343
1344 spin_lock(cinfo->lock);
1345 buckets = cinfo->ds->buckets;
1346 list = &buckets[ds_commit_idx].written;
1347 if (list_empty(list)) {
1348 /* Non-empty buckets hold a reference on the lseg. That ref
1349 * is normally transferred to the COMMIT call and released
1350 * there. It could also be released if the last req is pulled
1351 * off due to a rewrite, in which case it will be done in
1352 * pnfs_common_clear_request_commit
1353 */
1354 WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
1355 buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
1356 }
1357 set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1358 cinfo->ds->nwritten++;
1359
1360 /* nfs_request_add_commit_list(). We need to add req to list without
1361 * dropping cinfo lock.
1362 */
1363 set_bit(PG_CLEAN, &(req)->wb_flags);
1364 nfs_list_add_request(req, list);
1365 cinfo->mds->ncommit++;
1366 spin_unlock(cinfo->lock);
1367 if (!cinfo->dreq) {
1368 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1369 inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
1370 BDI_RECLAIMABLE);
1371 __mark_inode_dirty(req->wb_context->dentry->d_inode,
1372 I_DIRTY_DATASYNC);
1373 }
1374}
1375
1376static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) 1335static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1377{ 1336{
1378 return i; 1337 return i;
@@ -1540,7 +1499,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = {
1540 .pg_write_ops = &ff_layout_pg_write_ops, 1499 .pg_write_ops = &ff_layout_pg_write_ops,
1541 .get_ds_info = ff_layout_get_ds_info, 1500 .get_ds_info = ff_layout_get_ds_info,
1542 .free_deviceid_node = ff_layout_free_deveiceid_node, 1501 .free_deviceid_node = ff_layout_free_deveiceid_node,
1543 .mark_request_commit = ff_layout_mark_request_commit, 1502 .mark_request_commit = pnfs_layout_mark_request_commit,
1544 .clear_request_commit = pnfs_generic_clear_request_commit, 1503 .clear_request_commit = pnfs_generic_clear_request_commit,
1545 .scan_commit_lists = pnfs_generic_scan_commit_lists, 1504 .scan_commit_lists = pnfs_generic_scan_commit_lists,
1546 .recover_commit_reqs = pnfs_generic_recover_commit_reqs, 1505 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e4f0dcef8f54..83107be3dd01 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1775,7 +1775,6 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
1775#if IS_ENABLED(CONFIG_NFS_V4) 1775#if IS_ENABLED(CONFIG_NFS_V4)
1776 INIT_LIST_HEAD(&nfsi->open_states); 1776 INIT_LIST_HEAD(&nfsi->open_states);
1777 nfsi->delegation = NULL; 1777 nfsi->delegation = NULL;
1778 nfsi->delegation_state = 0;
1779 init_rwsem(&nfsi->rwsem); 1778 init_rwsem(&nfsi->rwsem);
1780 nfsi->layout = NULL; 1779 nfsi->layout = NULL;
1781#endif 1780#endif
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 212b8c883d22..b802fb3a2d99 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -598,6 +598,19 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize)
598} 598}
599 599
600/* 600/*
601 * Record the page as unstable and mark its inode as dirty.
602 */
603static inline
604void nfs_mark_page_unstable(struct page *page)
605{
606 struct inode *inode = page_file_mapping(page)->host;
607
608 inc_zone_page_state(page, NR_UNSTABLE_NFS);
609 inc_bdi_stat(inode_to_bdi(inode), BDI_RECLAIMABLE);
610 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
611}
612
613/*
601 * Determine the number of bytes of data the page contains 614 * Determine the number of bytes of data the page contains
602 */ 615 */
603static inline 616static inline
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 2e7c9f7a6f7c..88180ac5ea0e 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -6648,47 +6648,47 @@ nfs41_same_server_scope(struct nfs41_server_scope *a,
6648int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 6648int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
6649{ 6649{
6650 int status; 6650 int status;
6651 struct nfs41_bind_conn_to_session_args args = {
6652 .client = clp,
6653 .dir = NFS4_CDFC4_FORE_OR_BOTH,
6654 };
6651 struct nfs41_bind_conn_to_session_res res; 6655 struct nfs41_bind_conn_to_session_res res;
6652 struct rpc_message msg = { 6656 struct rpc_message msg = {
6653 .rpc_proc = 6657 .rpc_proc =
6654 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 6658 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
6655 .rpc_argp = clp, 6659 .rpc_argp = &args,
6656 .rpc_resp = &res, 6660 .rpc_resp = &res,
6657 .rpc_cred = cred, 6661 .rpc_cred = cred,
6658 }; 6662 };
6659 6663
6660 dprintk("--> %s\n", __func__); 6664 dprintk("--> %s\n", __func__);
6661 6665
6662 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); 6666 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
6663 if (unlikely(res.session == NULL)) { 6667 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
6664 status = -ENOMEM; 6668 args.dir = NFS4_CDFC4_FORE;
6665 goto out;
6666 }
6667 6669
6668 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 6670 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6669 trace_nfs4_bind_conn_to_session(clp, status); 6671 trace_nfs4_bind_conn_to_session(clp, status);
6670 if (status == 0) { 6672 if (status == 0) {
6671 if (memcmp(res.session->sess_id.data, 6673 if (memcmp(res.sessionid.data,
6672 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 6674 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
6673 dprintk("NFS: %s: Session ID mismatch\n", __func__); 6675 dprintk("NFS: %s: Session ID mismatch\n", __func__);
6674 status = -EIO; 6676 status = -EIO;
6675 goto out_session; 6677 goto out;
6676 } 6678 }
6677 if (res.dir != NFS4_CDFS4_BOTH) { 6679 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
6678 dprintk("NFS: %s: Unexpected direction from server\n", 6680 dprintk("NFS: %s: Unexpected direction from server\n",
6679 __func__); 6681 __func__);
6680 status = -EIO; 6682 status = -EIO;
6681 goto out_session; 6683 goto out;
6682 } 6684 }
6683 if (res.use_conn_in_rdma_mode) { 6685 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
6684 dprintk("NFS: %s: Server returned RDMA mode = true\n", 6686 dprintk("NFS: %s: Server returned RDMA mode = true\n",
6685 __func__); 6687 __func__);
6686 status = -EIO; 6688 status = -EIO;
6687 goto out_session; 6689 goto out;
6688 } 6690 }
6689 } 6691 }
6690out_session:
6691 kfree(res.session);
6692out: 6692out:
6693 dprintk("<-- %s status= %d\n", __func__, status); 6693 dprintk("<-- %s status= %d\n", __func__, status);
6694 return status; 6694 return status;
@@ -7166,10 +7166,11 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
7166 args->bc_attrs.max_reqs); 7166 args->bc_attrs.max_reqs);
7167} 7167}
7168 7168
7169static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) 7169static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
7170 struct nfs41_create_session_res *res)
7170{ 7171{
7171 struct nfs4_channel_attrs *sent = &args->fc_attrs; 7172 struct nfs4_channel_attrs *sent = &args->fc_attrs;
7172 struct nfs4_channel_attrs *rcvd = &session->fc_attrs; 7173 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
7173 7174
7174 if (rcvd->max_resp_sz > sent->max_resp_sz) 7175 if (rcvd->max_resp_sz > sent->max_resp_sz)
7175 return -EINVAL; 7176 return -EINVAL;
@@ -7188,11 +7189,14 @@ static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args
7188 return 0; 7189 return 0;
7189} 7190}
7190 7191
7191static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) 7192static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
7193 struct nfs41_create_session_res *res)
7192{ 7194{
7193 struct nfs4_channel_attrs *sent = &args->bc_attrs; 7195 struct nfs4_channel_attrs *sent = &args->bc_attrs;
7194 struct nfs4_channel_attrs *rcvd = &session->bc_attrs; 7196 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
7195 7197
7198 if (!(res->flags & SESSION4_BACK_CHAN))
7199 goto out;
7196 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 7200 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
7197 return -EINVAL; 7201 return -EINVAL;
7198 if (rcvd->max_resp_sz < sent->max_resp_sz) 7202 if (rcvd->max_resp_sz < sent->max_resp_sz)
@@ -7204,18 +7208,30 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args
7204 return -EINVAL; 7208 return -EINVAL;
7205 if (rcvd->max_reqs != sent->max_reqs) 7209 if (rcvd->max_reqs != sent->max_reqs)
7206 return -EINVAL; 7210 return -EINVAL;
7211out:
7207 return 0; 7212 return 0;
7208} 7213}
7209 7214
7210static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 7215static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
7211 struct nfs4_session *session) 7216 struct nfs41_create_session_res *res)
7212{ 7217{
7213 int ret; 7218 int ret;
7214 7219
7215 ret = nfs4_verify_fore_channel_attrs(args, session); 7220 ret = nfs4_verify_fore_channel_attrs(args, res);
7216 if (ret) 7221 if (ret)
7217 return ret; 7222 return ret;
7218 return nfs4_verify_back_channel_attrs(args, session); 7223 return nfs4_verify_back_channel_attrs(args, res);
7224}
7225
7226static void nfs4_update_session(struct nfs4_session *session,
7227 struct nfs41_create_session_res *res)
7228{
7229 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
7230 session->flags = res->flags;
7231 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
7232 if (res->flags & SESSION4_BACK_CHAN)
7233 memcpy(&session->bc_attrs, &res->bc_attrs,
7234 sizeof(session->bc_attrs));
7219} 7235}
7220 7236
7221static int _nfs4_proc_create_session(struct nfs_client *clp, 7237static int _nfs4_proc_create_session(struct nfs_client *clp,
@@ -7224,11 +7240,12 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
7224 struct nfs4_session *session = clp->cl_session; 7240 struct nfs4_session *session = clp->cl_session;
7225 struct nfs41_create_session_args args = { 7241 struct nfs41_create_session_args args = {
7226 .client = clp, 7242 .client = clp,
7243 .clientid = clp->cl_clientid,
7244 .seqid = clp->cl_seqid,
7227 .cb_program = NFS4_CALLBACK, 7245 .cb_program = NFS4_CALLBACK,
7228 }; 7246 };
7229 struct nfs41_create_session_res res = { 7247 struct nfs41_create_session_res res;
7230 .client = clp, 7248
7231 };
7232 struct rpc_message msg = { 7249 struct rpc_message msg = {
7233 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 7250 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
7234 .rpc_argp = &args, 7251 .rpc_argp = &args,
@@ -7245,11 +7262,15 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
7245 7262
7246 if (!status) { 7263 if (!status) {
7247 /* Verify the session's negotiated channel_attrs values */ 7264 /* Verify the session's negotiated channel_attrs values */
7248 status = nfs4_verify_channel_attrs(&args, session); 7265 status = nfs4_verify_channel_attrs(&args, &res);
7249 /* Increment the clientid slot sequence id */ 7266 /* Increment the clientid slot sequence id */
7250 clp->cl_seqid++; 7267 if (clp->cl_seqid == res.seqid)
7268 clp->cl_seqid++;
7269 if (status)
7270 goto out;
7271 nfs4_update_session(session, &res);
7251 } 7272 }
7252 7273out:
7253 return status; 7274 return status;
7254} 7275}
7255 7276
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index e799dc3c3b1d..e23366effcfb 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -450,7 +450,7 @@ int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
450 tbl = &ses->fc_slot_table; 450 tbl = &ses->fc_slot_table;
451 tbl->session = ses; 451 tbl->session = ses;
452 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1); 452 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
453 if (status) /* -ENOMEM */ 453 if (status || !(ses->flags & SESSION4_BACK_CHAN)) /* -ENOMEM */
454 return status; 454 return status;
455 /* Back channel */ 455 /* Back channel */
456 tbl = &ses->bc_slot_table; 456 tbl = &ses->bc_slot_table;
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index b34ada9bc6a2..fc46c7455898 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -118,6 +118,12 @@ static inline int nfs4_has_persistent_session(const struct nfs_client *clp)
118 return 0; 118 return 0;
119} 119}
120 120
121static inline void nfs4_copy_sessionid(struct nfs4_sessionid *dst,
122 const struct nfs4_sessionid *src)
123{
124 memcpy(dst->data, src->data, NFS4_MAX_SESSIONID_LEN);
125}
126
121#ifdef CONFIG_CRC32 127#ifdef CONFIG_CRC32
122/* 128/*
123 * nfs_session_id_hash - calculate the crc32 hash for the session id 129 * nfs_session_id_hash - calculate the crc32 hash for the session id
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index e23a0a664e12..5c399ec41079 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1715,17 +1715,17 @@ static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, stru
1715#if defined(CONFIG_NFS_V4_1) 1715#if defined(CONFIG_NFS_V4_1)
1716/* NFSv4.1 operations */ 1716/* NFSv4.1 operations */
1717static void encode_bind_conn_to_session(struct xdr_stream *xdr, 1717static void encode_bind_conn_to_session(struct xdr_stream *xdr,
1718 struct nfs4_session *session, 1718 struct nfs41_bind_conn_to_session_args *args,
1719 struct compound_hdr *hdr) 1719 struct compound_hdr *hdr)
1720{ 1720{
1721 __be32 *p; 1721 __be32 *p;
1722 1722
1723 encode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION, 1723 encode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION,
1724 decode_bind_conn_to_session_maxsz, hdr); 1724 decode_bind_conn_to_session_maxsz, hdr);
1725 encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN); 1725 encode_opaque_fixed(xdr, args->sessionid.data, NFS4_MAX_SESSIONID_LEN);
1726 p = xdr_reserve_space(xdr, 8); 1726 p = xdr_reserve_space(xdr, 8);
1727 *p++ = cpu_to_be32(NFS4_CDFC4_BACK_OR_BOTH); 1727 *p++ = cpu_to_be32(args->dir);
1728 *p = 0; /* use_conn_in_rdma_mode = False */ 1728 *p = (args->use_conn_in_rdma_mode) ? cpu_to_be32(1) : cpu_to_be32(0);
1729} 1729}
1730 1730
1731static void encode_op_map(struct xdr_stream *xdr, struct nfs4_op_map *op_map) 1731static void encode_op_map(struct xdr_stream *xdr, struct nfs4_op_map *op_map)
@@ -1806,8 +1806,8 @@ static void encode_create_session(struct xdr_stream *xdr,
1806 1806
1807 encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr); 1807 encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr);
1808 p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12); 1808 p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12);
1809 p = xdr_encode_hyper(p, clp->cl_clientid); 1809 p = xdr_encode_hyper(p, args->clientid);
1810 *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ 1810 *p++ = cpu_to_be32(args->seqid); /*Sequence id */
1811 *p++ = cpu_to_be32(args->flags); /*flags */ 1811 *p++ = cpu_to_be32(args->flags); /*flags */
1812 1812
1813 /* Fore Channel */ 1813 /* Fore Channel */
@@ -2734,14 +2734,14 @@ static void nfs4_xdr_enc_fsid_present(struct rpc_rqst *req,
2734 */ 2734 */
2735static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req, 2735static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
2736 struct xdr_stream *xdr, 2736 struct xdr_stream *xdr,
2737 struct nfs_client *clp) 2737 struct nfs41_bind_conn_to_session_args *args)
2738{ 2738{
2739 struct compound_hdr hdr = { 2739 struct compound_hdr hdr = {
2740 .minorversion = clp->cl_mvops->minor_version, 2740 .minorversion = args->client->cl_mvops->minor_version,
2741 }; 2741 };
2742 2742
2743 encode_compound_hdr(xdr, req, &hdr); 2743 encode_compound_hdr(xdr, req, &hdr);
2744 encode_bind_conn_to_session(xdr, clp->cl_session, &hdr); 2744 encode_bind_conn_to_session(xdr, args, &hdr);
2745 encode_nops(&hdr); 2745 encode_nops(&hdr);
2746} 2746}
2747 2747
@@ -5613,7 +5613,7 @@ static int decode_bind_conn_to_session(struct xdr_stream *xdr,
5613 5613
5614 status = decode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION); 5614 status = decode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION);
5615 if (!status) 5615 if (!status)
5616 status = decode_sessionid(xdr, &res->session->sess_id); 5616 status = decode_sessionid(xdr, &res->sessionid);
5617 if (unlikely(status)) 5617 if (unlikely(status))
5618 return status; 5618 return status;
5619 5619
@@ -5641,12 +5641,10 @@ static int decode_create_session(struct xdr_stream *xdr,
5641{ 5641{
5642 __be32 *p; 5642 __be32 *p;
5643 int status; 5643 int status;
5644 struct nfs_client *clp = res->client;
5645 struct nfs4_session *session = clp->cl_session;
5646 5644
5647 status = decode_op_hdr(xdr, OP_CREATE_SESSION); 5645 status = decode_op_hdr(xdr, OP_CREATE_SESSION);
5648 if (!status) 5646 if (!status)
5649 status = decode_sessionid(xdr, &session->sess_id); 5647 status = decode_sessionid(xdr, &res->sessionid);
5650 if (unlikely(status)) 5648 if (unlikely(status))
5651 return status; 5649 return status;
5652 5650
@@ -5654,13 +5652,13 @@ static int decode_create_session(struct xdr_stream *xdr,
5654 p = xdr_inline_decode(xdr, 8); 5652 p = xdr_inline_decode(xdr, 8);
5655 if (unlikely(!p)) 5653 if (unlikely(!p))
5656 goto out_overflow; 5654 goto out_overflow;
5657 clp->cl_seqid = be32_to_cpup(p++); 5655 res->seqid = be32_to_cpup(p++);
5658 session->flags = be32_to_cpup(p); 5656 res->flags = be32_to_cpup(p);
5659 5657
5660 /* Channel attributes */ 5658 /* Channel attributes */
5661 status = decode_chan_attrs(xdr, &session->fc_attrs); 5659 status = decode_chan_attrs(xdr, &res->fc_attrs);
5662 if (!status) 5660 if (!status)
5663 status = decode_chan_attrs(xdr, &session->bc_attrs); 5661 status = decode_chan_attrs(xdr, &res->bc_attrs);
5664 return status; 5662 return status;
5665out_overflow: 5663out_overflow:
5666 print_overflow_msg(__func__, xdr); 5664 print_overflow_msg(__func__, xdr);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 797cd6253adf..635f0865671c 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -344,6 +344,10 @@ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
344struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net, 344struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
345 struct xdr_stream *xdr, 345 struct xdr_stream *xdr,
346 gfp_t gfp_flags); 346 gfp_t gfp_flags);
347void pnfs_layout_mark_request_commit(struct nfs_page *req,
348 struct pnfs_layout_segment *lseg,
349 struct nfs_commit_info *cinfo,
350 u32 ds_commit_idx);
347 351
348static inline bool nfs_have_layout(struct inode *inode) 352static inline bool nfs_have_layout(struct inode *inode)
349{ 353{
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index fdc4f6562bb7..54e36b38fb5f 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -838,3 +838,33 @@ out_err:
838 return NULL; 838 return NULL;
839} 839}
840EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr); 840EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
841
842void
843pnfs_layout_mark_request_commit(struct nfs_page *req,
844 struct pnfs_layout_segment *lseg,
845 struct nfs_commit_info *cinfo,
846 u32 ds_commit_idx)
847{
848 struct list_head *list;
849 struct pnfs_commit_bucket *buckets;
850
851 spin_lock(cinfo->lock);
852 buckets = cinfo->ds->buckets;
853 list = &buckets[ds_commit_idx].written;
854 if (list_empty(list)) {
855 /* Non-empty buckets hold a reference on the lseg. That ref
856 * is normally transferred to the COMMIT call and released
857 * there. It could also be released if the last req is pulled
858 * off due to a rewrite, in which case it will be done in
859 * pnfs_common_clear_request_commit
860 */
861 WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
862 buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
863 }
864 set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
865 cinfo->ds->nwritten++;
866 spin_unlock(cinfo->lock);
867
868 nfs_request_add_commit_list(req, list, cinfo);
869}
870EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 88a6d2196ece..595d81e354d1 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -789,13 +789,8 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
789 nfs_list_add_request(req, dst); 789 nfs_list_add_request(req, dst);
790 cinfo->mds->ncommit++; 790 cinfo->mds->ncommit++;
791 spin_unlock(cinfo->lock); 791 spin_unlock(cinfo->lock);
792 if (!cinfo->dreq) { 792 if (!cinfo->dreq)
793 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 793 nfs_mark_page_unstable(req->wb_page);
794 inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
795 BDI_RECLAIMABLE);
796 __mark_inode_dirty(req->wb_context->dentry->d_inode,
797 I_DIRTY_DATASYNC);
798 }
799} 794}
800EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); 795EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
801 796
@@ -1605,11 +1600,8 @@ void nfs_retry_commit(struct list_head *page_list,
1605 req = nfs_list_entry(page_list->next); 1600 req = nfs_list_entry(page_list->next);
1606 nfs_list_remove_request(req); 1601 nfs_list_remove_request(req);
1607 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); 1602 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
1608 if (!cinfo->dreq) { 1603 if (!cinfo->dreq)
1609 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1604 nfs_clear_page_commit(req->wb_page);
1610 dec_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
1611 BDI_RECLAIMABLE);
1612 }
1613 nfs_unlock_and_release_request(req); 1605 nfs_unlock_and_release_request(req);
1614 } 1606 }
1615} 1607}
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index cc6a76072009..1c307f02baa8 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -583,7 +583,7 @@ nfs4_reset_recoverydir(char *recdir)
583 if (status) 583 if (status)
584 return status; 584 return status;
585 status = -ENOTDIR; 585 status = -ENOTDIR;
586 if (S_ISDIR(path.dentry->d_inode->i_mode)) { 586 if (d_is_dir(path.dentry)) {
587 strcpy(user_recovery_dirname, recdir); 587 strcpy(user_recovery_dirname, recdir);
588 status = 0; 588 status = 0;
589 } 589 }
@@ -1426,7 +1426,7 @@ nfsd4_client_tracking_init(struct net *net)
1426 nn->client_tracking_ops = &nfsd4_legacy_tracking_ops; 1426 nn->client_tracking_ops = &nfsd4_legacy_tracking_ops;
1427 status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path); 1427 status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path);
1428 if (!status) { 1428 if (!status) {
1429 status = S_ISDIR(path.dentry->d_inode->i_mode); 1429 status = d_is_dir(path.dentry);
1430 path_put(&path); 1430 path_put(&path);
1431 if (status) 1431 if (status)
1432 goto do_init; 1432 goto do_init;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f6b2a09f793f..d2f2c37dc2db 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1638,7 +1638,7 @@ __destroy_client(struct nfs4_client *clp)
1638 nfs4_put_stid(&dp->dl_stid); 1638 nfs4_put_stid(&dp->dl_stid);
1639 } 1639 }
1640 while (!list_empty(&clp->cl_revoked)) { 1640 while (!list_empty(&clp->cl_revoked)) {
1641 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 1641 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
1642 list_del_init(&dp->dl_recall_lru); 1642 list_del_init(&dp->dl_recall_lru);
1643 nfs4_put_stid(&dp->dl_stid); 1643 nfs4_put_stid(&dp->dl_stid);
1644 } 1644 }
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 965b478d50fc..e9fa966fc37f 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -114,8 +114,8 @@ static inline __be32 check_pseudo_root(struct svc_rqst *rqstp,
114 * We're exposing only the directories and symlinks that have to be 114 * We're exposing only the directories and symlinks that have to be
115 * traversed on the way to real exports: 115 * traversed on the way to real exports:
116 */ 116 */
117 if (unlikely(!S_ISDIR(dentry->d_inode->i_mode) && 117 if (unlikely(!d_is_dir(dentry) &&
118 !S_ISLNK(dentry->d_inode->i_mode))) 118 !d_is_symlink(dentry)))
119 return nfserr_stale; 119 return nfserr_stale;
120 /* 120 /*
121 * A pseudoroot export gives permission to access only one 121 * A pseudoroot export gives permission to access only one
@@ -259,7 +259,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
259 goto out; 259 goto out;
260 } 260 }
261 261
262 if (S_ISDIR(dentry->d_inode->i_mode) && 262 if (d_is_dir(dentry) &&
263 (dentry->d_flags & DCACHE_DISCONNECTED)) { 263 (dentry->d_flags & DCACHE_DISCONNECTED)) {
264 printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %pd2\n", 264 printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %pd2\n",
265 dentry); 265 dentry);
@@ -414,7 +414,7 @@ static inline void _fh_update_old(struct dentry *dentry,
414{ 414{
415 fh->ofh_ino = ino_t_to_u32(dentry->d_inode->i_ino); 415 fh->ofh_ino = ino_t_to_u32(dentry->d_inode->i_ino);
416 fh->ofh_generation = dentry->d_inode->i_generation; 416 fh->ofh_generation = dentry->d_inode->i_generation;
417 if (S_ISDIR(dentry->d_inode->i_mode) || 417 if (d_is_dir(dentry) ||
418 (exp->ex_flags & NFSEXP_NOSUBTREECHECK)) 418 (exp->ex_flags & NFSEXP_NOSUBTREECHECK))
419 fh->ofh_dirino = 0; 419 fh->ofh_dirino = 0;
420} 420}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 5685c679dd93..368526582429 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -615,9 +615,9 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
615 export = fhp->fh_export; 615 export = fhp->fh_export;
616 dentry = fhp->fh_dentry; 616 dentry = fhp->fh_dentry;
617 617
618 if (S_ISREG(dentry->d_inode->i_mode)) 618 if (d_is_reg(dentry))
619 map = nfs3_regaccess; 619 map = nfs3_regaccess;
620 else if (S_ISDIR(dentry->d_inode->i_mode)) 620 else if (d_is_dir(dentry))
621 map = nfs3_diraccess; 621 map = nfs3_diraccess;
622 else 622 else
623 map = nfs3_anyaccess; 623 map = nfs3_anyaccess;
@@ -1402,7 +1402,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
1402 1402
1403 switch (createmode) { 1403 switch (createmode) {
1404 case NFS3_CREATE_UNCHECKED: 1404 case NFS3_CREATE_UNCHECKED:
1405 if (! S_ISREG(dchild->d_inode->i_mode)) 1405 if (! d_is_reg(dchild))
1406 goto out; 1406 goto out;
1407 else if (truncp) { 1407 else if (truncp) {
1408 /* in nfsv4, we need to treat this case a little 1408 /* in nfsv4, we need to treat this case a little
@@ -1615,7 +1615,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
1615 if (err) 1615 if (err)
1616 goto out; 1616 goto out;
1617 err = nfserr_isdir; 1617 err = nfserr_isdir;
1618 if (S_ISDIR(tfhp->fh_dentry->d_inode->i_mode)) 1618 if (d_is_dir(tfhp->fh_dentry))
1619 goto out; 1619 goto out;
1620 err = nfserr_perm; 1620 err = nfserr_perm;
1621 if (!len) 1621 if (!len)
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index b2e3ff347620..ecdbae19a766 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -31,6 +31,8 @@
31#include "alloc.h" 31#include "alloc.h"
32#include "dat.h" 32#include "dat.h"
33 33
34static void __nilfs_btree_init(struct nilfs_bmap *bmap);
35
34static struct nilfs_btree_path *nilfs_btree_alloc_path(void) 36static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
35{ 37{
36 struct nilfs_btree_path *path; 38 struct nilfs_btree_path *path;
@@ -368,6 +370,34 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
368 return ret; 370 return ret;
369} 371}
370 372
373/**
374 * nilfs_btree_root_broken - verify consistency of btree root node
375 * @node: btree root node to be examined
376 * @ino: inode number
377 *
378 * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
379 */
380static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
381 unsigned long ino)
382{
383 int level, flags, nchildren;
384 int ret = 0;
385
386 level = nilfs_btree_node_get_level(node);
387 flags = nilfs_btree_node_get_flags(node);
388 nchildren = nilfs_btree_node_get_nchildren(node);
389
390 if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
391 level > NILFS_BTREE_LEVEL_MAX ||
392 nchildren < 0 ||
393 nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
394 pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
395 ino, level, flags, nchildren);
396 ret = 1;
397 }
398 return ret;
399}
400
371int nilfs_btree_broken_node_block(struct buffer_head *bh) 401int nilfs_btree_broken_node_block(struct buffer_head *bh)
372{ 402{
373 int ret; 403 int ret;
@@ -1713,7 +1743,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
1713 1743
1714 /* convert and insert */ 1744 /* convert and insert */
1715 dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; 1745 dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
1716 nilfs_btree_init(btree); 1746 __nilfs_btree_init(btree);
1717 if (nreq != NULL) { 1747 if (nreq != NULL) {
1718 nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); 1748 nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
1719 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); 1749 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat);
@@ -2294,12 +2324,23 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
2294 .bop_gather_data = NULL, 2324 .bop_gather_data = NULL,
2295}; 2325};
2296 2326
2297int nilfs_btree_init(struct nilfs_bmap *bmap) 2327static void __nilfs_btree_init(struct nilfs_bmap *bmap)
2298{ 2328{
2299 bmap->b_ops = &nilfs_btree_ops; 2329 bmap->b_ops = &nilfs_btree_ops;
2300 bmap->b_nchildren_per_block = 2330 bmap->b_nchildren_per_block =
2301 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap)); 2331 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
2302 return 0; 2332}
2333
2334int nilfs_btree_init(struct nilfs_bmap *bmap)
2335{
2336 int ret = 0;
2337
2338 __nilfs_btree_init(bmap);
2339
2340 if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap),
2341 bmap->b_inode->i_ino))
2342 ret = -EIO;
2343 return ret;
2303} 2344}
2304 2345
2305void nilfs_btree_init_gc(struct nilfs_bmap *bmap) 2346void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 51ceb8107284..9a66ff79ff27 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -115,8 +115,8 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
115 return false; 115 return false;
116 116
117 /* sorry, fanotify only gives a damn about files and dirs */ 117 /* sorry, fanotify only gives a damn about files and dirs */
118 if (!S_ISREG(path->dentry->d_inode->i_mode) && 118 if (!d_is_reg(path->dentry) &&
119 !S_ISDIR(path->dentry->d_inode->i_mode)) 119 !d_can_lookup(path->dentry))
120 return false; 120 return false;
121 121
122 if (inode_mark && vfsmnt_mark) { 122 if (inode_mark && vfsmnt_mark) {
@@ -139,7 +139,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
139 BUG(); 139 BUG();
140 } 140 }
141 141
142 if (S_ISDIR(path->dentry->d_inode->i_mode) && 142 if (d_is_dir(path->dentry) &&
143 !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) 143 !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
144 return false; 144 return false;
145 145
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index ea10a8719107..24f640441bd9 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -191,7 +191,6 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
191 ovl_set_timestamps(upperdentry, stat); 191 ovl_set_timestamps(upperdentry, stat);
192 192
193 return err; 193 return err;
194
195} 194}
196 195
197static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, 196static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
@@ -385,7 +384,7 @@ int ovl_copy_up(struct dentry *dentry)
385 struct kstat stat; 384 struct kstat stat;
386 enum ovl_path_type type = ovl_path_type(dentry); 385 enum ovl_path_type type = ovl_path_type(dentry);
387 386
388 if (type != OVL_PATH_LOWER) 387 if (OVL_TYPE_UPPER(type))
389 break; 388 break;
390 389
391 next = dget(dentry); 390 next = dget(dentry);
@@ -394,7 +393,7 @@ int ovl_copy_up(struct dentry *dentry)
394 parent = dget_parent(next); 393 parent = dget_parent(next);
395 394
396 type = ovl_path_type(parent); 395 type = ovl_path_type(parent);
397 if (type != OVL_PATH_LOWER) 396 if (OVL_TYPE_UPPER(type))
398 break; 397 break;
399 398
400 dput(next); 399 dput(next);
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 8ffc4b980f1b..d139405d2bfa 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -19,7 +19,7 @@ void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
19 int err; 19 int err;
20 20
21 dget(wdentry); 21 dget(wdentry);
22 if (S_ISDIR(wdentry->d_inode->i_mode)) 22 if (d_is_dir(wdentry))
23 err = ovl_do_rmdir(wdir, wdentry); 23 err = ovl_do_rmdir(wdir, wdentry);
24 else 24 else
25 err = ovl_do_unlink(wdir, wdentry); 25 err = ovl_do_unlink(wdir, wdentry);
@@ -118,14 +118,14 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry,
118 118
119static int ovl_set_opaque(struct dentry *upperdentry) 119static int ovl_set_opaque(struct dentry *upperdentry)
120{ 120{
121 return ovl_do_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0); 121 return ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0);
122} 122}
123 123
124static void ovl_remove_opaque(struct dentry *upperdentry) 124static void ovl_remove_opaque(struct dentry *upperdentry)
125{ 125{
126 int err; 126 int err;
127 127
128 err = ovl_do_removexattr(upperdentry, ovl_opaque_xattr); 128 err = ovl_do_removexattr(upperdentry, OVL_XATTR_OPAQUE);
129 if (err) { 129 if (err) {
130 pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n", 130 pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n",
131 upperdentry->d_name.name, err); 131 upperdentry->d_name.name, err);
@@ -152,7 +152,7 @@ static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
152 * correct link count. nlink=1 seems to pacify 'find' and 152 * correct link count. nlink=1 seems to pacify 'find' and
153 * other utilities. 153 * other utilities.
154 */ 154 */
155 if (type == OVL_PATH_MERGE) 155 if (OVL_TYPE_MERGE(type))
156 stat->nlink = 1; 156 stat->nlink = 1;
157 157
158 return 0; 158 return 0;
@@ -506,7 +506,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
506 struct dentry *opaquedir = NULL; 506 struct dentry *opaquedir = NULL;
507 int err; 507 int err;
508 508
509 if (is_dir) { 509 if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
510 opaquedir = ovl_check_empty_and_clear(dentry); 510 opaquedir = ovl_check_empty_and_clear(dentry);
511 err = PTR_ERR(opaquedir); 511 err = PTR_ERR(opaquedir);
512 if (IS_ERR(opaquedir)) 512 if (IS_ERR(opaquedir))
@@ -630,7 +630,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
630 goto out_drop_write; 630 goto out_drop_write;
631 631
632 type = ovl_path_type(dentry); 632 type = ovl_path_type(dentry);
633 if (type == OVL_PATH_PURE_UPPER) { 633 if (OVL_TYPE_PURE_UPPER(type)) {
634 err = ovl_remove_upper(dentry, is_dir); 634 err = ovl_remove_upper(dentry, is_dir);
635 } else { 635 } else {
636 const struct cred *old_cred; 636 const struct cred *old_cred;
@@ -693,7 +693,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
693 bool new_create = false; 693 bool new_create = false;
694 bool cleanup_whiteout = false; 694 bool cleanup_whiteout = false;
695 bool overwrite = !(flags & RENAME_EXCHANGE); 695 bool overwrite = !(flags & RENAME_EXCHANGE);
696 bool is_dir = S_ISDIR(old->d_inode->i_mode); 696 bool is_dir = d_is_dir(old);
697 bool new_is_dir = false; 697 bool new_is_dir = false;
698 struct dentry *opaquedir = NULL; 698 struct dentry *opaquedir = NULL;
699 const struct cred *old_cred = NULL; 699 const struct cred *old_cred = NULL;
@@ -712,7 +712,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
712 /* Don't copy up directory trees */ 712 /* Don't copy up directory trees */
713 old_type = ovl_path_type(old); 713 old_type = ovl_path_type(old);
714 err = -EXDEV; 714 err = -EXDEV;
715 if ((old_type == OVL_PATH_LOWER || old_type == OVL_PATH_MERGE) && is_dir) 715 if (OVL_TYPE_MERGE_OR_LOWER(old_type) && is_dir)
716 goto out; 716 goto out;
717 717
718 if (new->d_inode) { 718 if (new->d_inode) {
@@ -720,30 +720,30 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
720 if (err) 720 if (err)
721 goto out; 721 goto out;
722 722
723 if (S_ISDIR(new->d_inode->i_mode)) 723 if (d_is_dir(new))
724 new_is_dir = true; 724 new_is_dir = true;
725 725
726 new_type = ovl_path_type(new); 726 new_type = ovl_path_type(new);
727 err = -EXDEV; 727 err = -EXDEV;
728 if (!overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) 728 if (!overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir)
729 goto out; 729 goto out;
730 730
731 err = 0; 731 err = 0;
732 if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) { 732 if (!OVL_TYPE_UPPER(new_type) && !OVL_TYPE_UPPER(old_type)) {
733 if (ovl_dentry_lower(old)->d_inode == 733 if (ovl_dentry_lower(old)->d_inode ==
734 ovl_dentry_lower(new)->d_inode) 734 ovl_dentry_lower(new)->d_inode)
735 goto out; 735 goto out;
736 } 736 }
737 if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) { 737 if (OVL_TYPE_UPPER(new_type) && OVL_TYPE_UPPER(old_type)) {
738 if (ovl_dentry_upper(old)->d_inode == 738 if (ovl_dentry_upper(old)->d_inode ==
739 ovl_dentry_upper(new)->d_inode) 739 ovl_dentry_upper(new)->d_inode)
740 goto out; 740 goto out;
741 } 741 }
742 } else { 742 } else {
743 if (ovl_dentry_is_opaque(new)) 743 if (ovl_dentry_is_opaque(new))
744 new_type = OVL_PATH_UPPER; 744 new_type = __OVL_PATH_UPPER;
745 else 745 else
746 new_type = OVL_PATH_PURE_UPPER; 746 new_type = __OVL_PATH_UPPER | __OVL_PATH_PURE;
747 } 747 }
748 748
749 err = ovl_want_write(old); 749 err = ovl_want_write(old);
@@ -763,8 +763,8 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
763 goto out_drop_write; 763 goto out_drop_write;
764 } 764 }
765 765
766 old_opaque = old_type != OVL_PATH_PURE_UPPER; 766 old_opaque = !OVL_TYPE_PURE_UPPER(old_type);
767 new_opaque = new_type != OVL_PATH_PURE_UPPER; 767 new_opaque = !OVL_TYPE_PURE_UPPER(new_type);
768 768
769 if (old_opaque || new_opaque) { 769 if (old_opaque || new_opaque) {
770 err = -ENOMEM; 770 err = -ENOMEM;
@@ -787,7 +787,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
787 old_cred = override_creds(override_cred); 787 old_cred = override_creds(override_cred);
788 } 788 }
789 789
790 if (overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) { 790 if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) {
791 opaquedir = ovl_check_empty_and_clear(new); 791 opaquedir = ovl_check_empty_and_clear(new);
792 err = PTR_ERR(opaquedir); 792 err = PTR_ERR(opaquedir);
793 if (IS_ERR(opaquedir)) { 793 if (IS_ERR(opaquedir)) {
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 07d74b24913b..04f124884687 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -205,7 +205,7 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
205 205
206static bool ovl_is_private_xattr(const char *name) 206static bool ovl_is_private_xattr(const char *name)
207{ 207{
208 return strncmp(name, "trusted.overlay.", 14) == 0; 208 return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
209} 209}
210 210
211int ovl_setxattr(struct dentry *dentry, const char *name, 211int ovl_setxattr(struct dentry *dentry, const char *name,
@@ -238,7 +238,10 @@ out:
238static bool ovl_need_xattr_filter(struct dentry *dentry, 238static bool ovl_need_xattr_filter(struct dentry *dentry,
239 enum ovl_path_type type) 239 enum ovl_path_type type)
240{ 240{
241 return type == OVL_PATH_UPPER && S_ISDIR(dentry->d_inode->i_mode); 241 if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER)
242 return S_ISDIR(dentry->d_inode->i_mode);
243 else
244 return false;
242} 245}
243 246
244ssize_t ovl_getxattr(struct dentry *dentry, const char *name, 247ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
@@ -299,7 +302,7 @@ int ovl_removexattr(struct dentry *dentry, const char *name)
299 if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) 302 if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
300 goto out_drop_write; 303 goto out_drop_write;
301 304
302 if (type == OVL_PATH_LOWER) { 305 if (!OVL_TYPE_UPPER(type)) {
303 err = vfs_getxattr(realpath.dentry, name, NULL, 0); 306 err = vfs_getxattr(realpath.dentry, name, NULL, 0);
304 if (err < 0) 307 if (err < 0)
305 goto out_drop_write; 308 goto out_drop_write;
@@ -321,7 +324,7 @@ out:
321static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type, 324static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
322 struct dentry *realdentry) 325 struct dentry *realdentry)
323{ 326{
324 if (type != OVL_PATH_LOWER) 327 if (OVL_TYPE_UPPER(type))
325 return false; 328 return false;
326 329
327 if (special_file(realdentry->d_inode->i_mode)) 330 if (special_file(realdentry->d_inode->i_mode))
@@ -430,5 +433,4 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
430 } 433 }
431 434
432 return inode; 435 return inode;
433
434} 436}
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 814bed33dd07..17ac5afc9ffb 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -12,13 +12,20 @@
12struct ovl_entry; 12struct ovl_entry;
13 13
14enum ovl_path_type { 14enum ovl_path_type {
15 OVL_PATH_PURE_UPPER, 15 __OVL_PATH_PURE = (1 << 0),
16 OVL_PATH_UPPER, 16 __OVL_PATH_UPPER = (1 << 1),
17 OVL_PATH_MERGE, 17 __OVL_PATH_MERGE = (1 << 2),
18 OVL_PATH_LOWER,
19}; 18};
20 19
21extern const char *ovl_opaque_xattr; 20#define OVL_TYPE_UPPER(type) ((type) & __OVL_PATH_UPPER)
21#define OVL_TYPE_MERGE(type) ((type) & __OVL_PATH_MERGE)
22#define OVL_TYPE_PURE_UPPER(type) ((type) & __OVL_PATH_PURE)
23#define OVL_TYPE_MERGE_OR_LOWER(type) \
24 (OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type))
25
26#define OVL_XATTR_PRE_NAME "trusted.overlay."
27#define OVL_XATTR_PRE_LEN 16
28#define OVL_XATTR_OPAQUE OVL_XATTR_PRE_NAME"opaque"
22 29
23static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry) 30static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
24{ 31{
@@ -130,6 +137,7 @@ void ovl_dentry_version_inc(struct dentry *dentry);
130void ovl_path_upper(struct dentry *dentry, struct path *path); 137void ovl_path_upper(struct dentry *dentry, struct path *path);
131void ovl_path_lower(struct dentry *dentry, struct path *path); 138void ovl_path_lower(struct dentry *dentry, struct path *path);
132enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path); 139enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
140int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
133struct dentry *ovl_dentry_upper(struct dentry *dentry); 141struct dentry *ovl_dentry_upper(struct dentry *dentry);
134struct dentry *ovl_dentry_lower(struct dentry *dentry); 142struct dentry *ovl_dentry_lower(struct dentry *dentry);
135struct dentry *ovl_dentry_real(struct dentry *dentry); 143struct dentry *ovl_dentry_real(struct dentry *dentry);
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index c0205990a9f5..907870e81a72 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -24,7 +24,6 @@ struct ovl_cache_entry {
24 struct list_head l_node; 24 struct list_head l_node;
25 struct rb_node node; 25 struct rb_node node;
26 bool is_whiteout; 26 bool is_whiteout;
27 bool is_cursor;
28 char name[]; 27 char name[];
29}; 28};
30 29
@@ -40,6 +39,7 @@ struct ovl_readdir_data {
40 struct rb_root root; 39 struct rb_root root;
41 struct list_head *list; 40 struct list_head *list;
42 struct list_head middle; 41 struct list_head middle;
42 struct dentry *dir;
43 int count; 43 int count;
44 int err; 44 int err;
45}; 45};
@@ -48,7 +48,7 @@ struct ovl_dir_file {
48 bool is_real; 48 bool is_real;
49 bool is_upper; 49 bool is_upper;
50 struct ovl_dir_cache *cache; 50 struct ovl_dir_cache *cache;
51 struct ovl_cache_entry cursor; 51 struct list_head *cursor;
52 struct file *realfile; 52 struct file *realfile;
53 struct file *upperfile; 53 struct file *upperfile;
54}; 54};
@@ -79,23 +79,49 @@ static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
79 return NULL; 79 return NULL;
80} 80}
81 81
82static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len, 82static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
83 const char *name, int len,
83 u64 ino, unsigned int d_type) 84 u64 ino, unsigned int d_type)
84{ 85{
85 struct ovl_cache_entry *p; 86 struct ovl_cache_entry *p;
86 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]); 87 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
87 88
88 p = kmalloc(size, GFP_KERNEL); 89 p = kmalloc(size, GFP_KERNEL);
89 if (p) { 90 if (!p)
90 memcpy(p->name, name, len); 91 return NULL;
91 p->name[len] = '\0'; 92
92 p->len = len; 93 memcpy(p->name, name, len);
93 p->type = d_type; 94 p->name[len] = '\0';
94 p->ino = ino; 95 p->len = len;
95 p->is_whiteout = false; 96 p->type = d_type;
96 p->is_cursor = false; 97 p->ino = ino;
97 } 98 p->is_whiteout = false;
99
100 if (d_type == DT_CHR) {
101 struct dentry *dentry;
102 const struct cred *old_cred;
103 struct cred *override_cred;
104
105 override_cred = prepare_creds();
106 if (!override_cred) {
107 kfree(p);
108 return NULL;
109 }
110
111 /*
112 * CAP_DAC_OVERRIDE for lookup
113 */
114 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
115 old_cred = override_creds(override_cred);
98 116
117 dentry = lookup_one_len(name, dir, len);
118 if (!IS_ERR(dentry)) {
119 p->is_whiteout = ovl_is_whiteout(dentry);
120 dput(dentry);
121 }
122 revert_creds(old_cred);
123 put_cred(override_cred);
124 }
99 return p; 125 return p;
100} 126}
101 127
@@ -122,7 +148,7 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
122 return 0; 148 return 0;
123 } 149 }
124 150
125 p = ovl_cache_entry_new(name, len, ino, d_type); 151 p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type);
126 if (p == NULL) 152 if (p == NULL)
127 return -ENOMEM; 153 return -ENOMEM;
128 154
@@ -143,7 +169,7 @@ static int ovl_fill_lower(struct ovl_readdir_data *rdd,
143 if (p) { 169 if (p) {
144 list_move_tail(&p->l_node, &rdd->middle); 170 list_move_tail(&p->l_node, &rdd->middle);
145 } else { 171 } else {
146 p = ovl_cache_entry_new(name, namelen, ino, d_type); 172 p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type);
147 if (p == NULL) 173 if (p == NULL)
148 rdd->err = -ENOMEM; 174 rdd->err = -ENOMEM;
149 else 175 else
@@ -168,7 +194,6 @@ static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
168{ 194{
169 struct ovl_dir_cache *cache = od->cache; 195 struct ovl_dir_cache *cache = od->cache;
170 196
171 list_del_init(&od->cursor.l_node);
172 WARN_ON(cache->refcount <= 0); 197 WARN_ON(cache->refcount <= 0);
173 cache->refcount--; 198 cache->refcount--;
174 if (!cache->refcount) { 199 if (!cache->refcount) {
@@ -204,6 +229,7 @@ static inline int ovl_dir_read(struct path *realpath,
204 if (IS_ERR(realfile)) 229 if (IS_ERR(realfile))
205 return PTR_ERR(realfile); 230 return PTR_ERR(realfile);
206 231
232 rdd->dir = realpath->dentry;
207 rdd->ctx.pos = 0; 233 rdd->ctx.pos = 0;
208 do { 234 do {
209 rdd->count = 0; 235 rdd->count = 0;
@@ -227,108 +253,58 @@ static void ovl_dir_reset(struct file *file)
227 if (cache && ovl_dentry_version_get(dentry) != cache->version) { 253 if (cache && ovl_dentry_version_get(dentry) != cache->version) {
228 ovl_cache_put(od, dentry); 254 ovl_cache_put(od, dentry);
229 od->cache = NULL; 255 od->cache = NULL;
256 od->cursor = NULL;
230 } 257 }
231 WARN_ON(!od->is_real && type != OVL_PATH_MERGE); 258 WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type));
232 if (od->is_real && type == OVL_PATH_MERGE) 259 if (od->is_real && OVL_TYPE_MERGE(type))
233 od->is_real = false; 260 od->is_real = false;
234} 261}
235 262
236static int ovl_dir_mark_whiteouts(struct dentry *dir,
237 struct ovl_readdir_data *rdd)
238{
239 struct ovl_cache_entry *p;
240 struct dentry *dentry;
241 const struct cred *old_cred;
242 struct cred *override_cred;
243
244 override_cred = prepare_creds();
245 if (!override_cred) {
246 ovl_cache_free(rdd->list);
247 return -ENOMEM;
248 }
249
250 /*
251 * CAP_DAC_OVERRIDE for lookup
252 */
253 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
254 old_cred = override_creds(override_cred);
255
256 mutex_lock(&dir->d_inode->i_mutex);
257 list_for_each_entry(p, rdd->list, l_node) {
258 if (p->is_cursor)
259 continue;
260
261 if (p->type != DT_CHR)
262 continue;
263
264 dentry = lookup_one_len(p->name, dir, p->len);
265 if (IS_ERR(dentry))
266 continue;
267
268 p->is_whiteout = ovl_is_whiteout(dentry);
269 dput(dentry);
270 }
271 mutex_unlock(&dir->d_inode->i_mutex);
272
273 revert_creds(old_cred);
274 put_cred(override_cred);
275
276 return 0;
277}
278
279static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) 263static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
280{ 264{
281 int err; 265 int err;
282 struct path lowerpath; 266 struct path realpath;
283 struct path upperpath;
284 struct ovl_readdir_data rdd = { 267 struct ovl_readdir_data rdd = {
285 .ctx.actor = ovl_fill_merge, 268 .ctx.actor = ovl_fill_merge,
286 .list = list, 269 .list = list,
287 .root = RB_ROOT, 270 .root = RB_ROOT,
288 .is_merge = false, 271 .is_merge = false,
289 }; 272 };
273 int idx, next;
290 274
291 ovl_path_lower(dentry, &lowerpath); 275 for (idx = 0; idx != -1; idx = next) {
292 ovl_path_upper(dentry, &upperpath); 276 next = ovl_path_next(idx, dentry, &realpath);
293 277
294 if (upperpath.dentry) { 278 if (next != -1) {
295 err = ovl_dir_read(&upperpath, &rdd); 279 err = ovl_dir_read(&realpath, &rdd);
296 if (err)
297 goto out;
298
299 if (lowerpath.dentry) {
300 err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd);
301 if (err) 280 if (err)
302 goto out; 281 break;
282 } else {
283 /*
284 * Insert lowest layer entries before upper ones, this
285 * allows offsets to be reasonably constant
286 */
287 list_add(&rdd.middle, rdd.list);
288 rdd.is_merge = true;
289 err = ovl_dir_read(&realpath, &rdd);
290 list_del(&rdd.middle);
303 } 291 }
304 } 292 }
305 if (lowerpath.dentry) {
306 /*
307 * Insert lowerpath entries before upperpath ones, this allows
308 * offsets to be reasonably constant
309 */
310 list_add(&rdd.middle, rdd.list);
311 rdd.is_merge = true;
312 err = ovl_dir_read(&lowerpath, &rdd);
313 list_del(&rdd.middle);
314 }
315out:
316 return err; 293 return err;
317} 294}
318 295
319static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) 296static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
320{ 297{
321 struct ovl_cache_entry *p; 298 struct list_head *p;
322 loff_t off = 0; 299 loff_t off = 0;
323 300
324 list_for_each_entry(p, &od->cache->entries, l_node) { 301 list_for_each(p, &od->cache->entries) {
325 if (p->is_cursor)
326 continue;
327 if (off >= pos) 302 if (off >= pos)
328 break; 303 break;
329 off++; 304 off++;
330 } 305 }
331 list_move_tail(&od->cursor.l_node, &p->l_node); 306 /* Cursor is safe since the cache is stable */
307 od->cursor = p;
332} 308}
333 309
334static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry) 310static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
@@ -367,6 +343,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
367{ 343{
368 struct ovl_dir_file *od = file->private_data; 344 struct ovl_dir_file *od = file->private_data;
369 struct dentry *dentry = file->f_path.dentry; 345 struct dentry *dentry = file->f_path.dentry;
346 struct ovl_cache_entry *p;
370 347
371 if (!ctx->pos) 348 if (!ctx->pos)
372 ovl_dir_reset(file); 349 ovl_dir_reset(file);
@@ -385,19 +362,13 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
385 ovl_seek_cursor(od, ctx->pos); 362 ovl_seek_cursor(od, ctx->pos);
386 } 363 }
387 364
388 while (od->cursor.l_node.next != &od->cache->entries) { 365 while (od->cursor != &od->cache->entries) {
389 struct ovl_cache_entry *p; 366 p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
390 367 if (!p->is_whiteout)
391 p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node); 368 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
392 /* Skip cursors */ 369 break;
393 if (!p->is_cursor) { 370 od->cursor = p->l_node.next;
394 if (!p->is_whiteout) { 371 ctx->pos++;
395 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
396 break;
397 }
398 ctx->pos++;
399 }
400 list_move(&od->cursor.l_node, &p->l_node);
401 } 372 }
402 return 0; 373 return 0;
403} 374}
@@ -452,7 +423,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
452 /* 423 /*
453 * Need to check if we started out being a lower dir, but got copied up 424 * Need to check if we started out being a lower dir, but got copied up
454 */ 425 */
455 if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) { 426 if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
456 struct inode *inode = file_inode(file); 427 struct inode *inode = file_inode(file);
457 428
458 realfile = lockless_dereference(od->upperfile); 429 realfile = lockless_dereference(od->upperfile);
@@ -516,11 +487,9 @@ static int ovl_dir_open(struct inode *inode, struct file *file)
516 kfree(od); 487 kfree(od);
517 return PTR_ERR(realfile); 488 return PTR_ERR(realfile);
518 } 489 }
519 INIT_LIST_HEAD(&od->cursor.l_node);
520 od->realfile = realfile; 490 od->realfile = realfile;
521 od->is_real = (type != OVL_PATH_MERGE); 491 od->is_real = !OVL_TYPE_MERGE(type);
522 od->is_upper = (type != OVL_PATH_LOWER); 492 od->is_upper = OVL_TYPE_UPPER(type);
523 od->cursor.is_cursor = true;
524 file->private_data = od; 493 file->private_data = od;
525 494
526 return 0; 495 return 0;
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index f16d318b71f8..b90952f528b1 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -35,7 +35,8 @@ struct ovl_config {
35/* private information held for overlayfs's superblock */ 35/* private information held for overlayfs's superblock */
36struct ovl_fs { 36struct ovl_fs {
37 struct vfsmount *upper_mnt; 37 struct vfsmount *upper_mnt;
38 struct vfsmount *lower_mnt; 38 unsigned numlower;
39 struct vfsmount **lower_mnt;
39 struct dentry *workdir; 40 struct dentry *workdir;
40 long lower_namelen; 41 long lower_namelen;
41 /* pathnames of lower and upper dirs, for show_options */ 42 /* pathnames of lower and upper dirs, for show_options */
@@ -47,7 +48,6 @@ struct ovl_dir_cache;
47/* private information held for every overlayfs dentry */ 48/* private information held for every overlayfs dentry */
48struct ovl_entry { 49struct ovl_entry {
49 struct dentry *__upperdentry; 50 struct dentry *__upperdentry;
50 struct dentry *lowerdentry;
51 struct ovl_dir_cache *cache; 51 struct ovl_dir_cache *cache;
52 union { 52 union {
53 struct { 53 struct {
@@ -56,30 +56,36 @@ struct ovl_entry {
56 }; 56 };
57 struct rcu_head rcu; 57 struct rcu_head rcu;
58 }; 58 };
59 unsigned numlower;
60 struct path lowerstack[];
59}; 61};
60 62
61const char *ovl_opaque_xattr = "trusted.overlay.opaque"; 63#define OVL_MAX_STACK 500
62 64
65static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe)
66{
67 return oe->numlower ? oe->lowerstack[0].dentry : NULL;
68}
63 69
64enum ovl_path_type ovl_path_type(struct dentry *dentry) 70enum ovl_path_type ovl_path_type(struct dentry *dentry)
65{ 71{
66 struct ovl_entry *oe = dentry->d_fsdata; 72 struct ovl_entry *oe = dentry->d_fsdata;
73 enum ovl_path_type type = 0;
67 74
68 if (oe->__upperdentry) { 75 if (oe->__upperdentry) {
69 if (oe->lowerdentry) { 76 type = __OVL_PATH_UPPER;
77
78 if (oe->numlower) {
70 if (S_ISDIR(dentry->d_inode->i_mode)) 79 if (S_ISDIR(dentry->d_inode->i_mode))
71 return OVL_PATH_MERGE; 80 type |= __OVL_PATH_MERGE;
72 else 81 } else if (!oe->opaque) {
73 return OVL_PATH_UPPER; 82 type |= __OVL_PATH_PURE;
74 } else {
75 if (oe->opaque)
76 return OVL_PATH_UPPER;
77 else
78 return OVL_PATH_PURE_UPPER;
79 } 83 }
80 } else { 84 } else {
81 return OVL_PATH_LOWER; 85 if (oe->numlower > 1)
86 type |= __OVL_PATH_MERGE;
82 } 87 }
88 return type;
83} 89}
84 90
85static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe) 91static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
@@ -98,10 +104,9 @@ void ovl_path_upper(struct dentry *dentry, struct path *path)
98 104
99enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path) 105enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
100{ 106{
101
102 enum ovl_path_type type = ovl_path_type(dentry); 107 enum ovl_path_type type = ovl_path_type(dentry);
103 108
104 if (type == OVL_PATH_LOWER) 109 if (!OVL_TYPE_UPPER(type))
105 ovl_path_lower(dentry, path); 110 ovl_path_lower(dentry, path);
106 else 111 else
107 ovl_path_upper(dentry, path); 112 ovl_path_upper(dentry, path);
@@ -120,7 +125,7 @@ struct dentry *ovl_dentry_lower(struct dentry *dentry)
120{ 125{
121 struct ovl_entry *oe = dentry->d_fsdata; 126 struct ovl_entry *oe = dentry->d_fsdata;
122 127
123 return oe->lowerdentry; 128 return __ovl_dentry_lower(oe);
124} 129}
125 130
126struct dentry *ovl_dentry_real(struct dentry *dentry) 131struct dentry *ovl_dentry_real(struct dentry *dentry)
@@ -130,7 +135,7 @@ struct dentry *ovl_dentry_real(struct dentry *dentry)
130 135
131 realdentry = ovl_upperdentry_dereference(oe); 136 realdentry = ovl_upperdentry_dereference(oe);
132 if (!realdentry) 137 if (!realdentry)
133 realdentry = oe->lowerdentry; 138 realdentry = __ovl_dentry_lower(oe);
134 139
135 return realdentry; 140 return realdentry;
136} 141}
@@ -143,7 +148,7 @@ struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper)
143 if (realdentry) { 148 if (realdentry) {
144 *is_upper = true; 149 *is_upper = true;
145 } else { 150 } else {
146 realdentry = oe->lowerdentry; 151 realdentry = __ovl_dentry_lower(oe);
147 *is_upper = false; 152 *is_upper = false;
148 } 153 }
149 return realdentry; 154 return realdentry;
@@ -165,11 +170,9 @@ void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache)
165 170
166void ovl_path_lower(struct dentry *dentry, struct path *path) 171void ovl_path_lower(struct dentry *dentry, struct path *path)
167{ 172{
168 struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
169 struct ovl_entry *oe = dentry->d_fsdata; 173 struct ovl_entry *oe = dentry->d_fsdata;
170 174
171 path->mnt = ofs->lower_mnt; 175 *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
172 path->dentry = oe->lowerdentry;
173} 176}
174 177
175int ovl_want_write(struct dentry *dentry) 178int ovl_want_write(struct dentry *dentry)
@@ -249,7 +252,7 @@ static bool ovl_is_opaquedir(struct dentry *dentry)
249 if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr) 252 if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr)
250 return false; 253 return false;
251 254
252 res = inode->i_op->getxattr(dentry, ovl_opaque_xattr, &val, 1); 255 res = inode->i_op->getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
253 if (res == 1 && val == 'y') 256 if (res == 1 && val == 'y')
254 return true; 257 return true;
255 258
@@ -261,8 +264,11 @@ static void ovl_dentry_release(struct dentry *dentry)
261 struct ovl_entry *oe = dentry->d_fsdata; 264 struct ovl_entry *oe = dentry->d_fsdata;
262 265
263 if (oe) { 266 if (oe) {
267 unsigned int i;
268
264 dput(oe->__upperdentry); 269 dput(oe->__upperdentry);
265 dput(oe->lowerdentry); 270 for (i = 0; i < oe->numlower; i++)
271 dput(oe->lowerstack[i].dentry);
266 kfree_rcu(oe, rcu); 272 kfree_rcu(oe, rcu);
267 } 273 }
268} 274}
@@ -271,9 +277,15 @@ static const struct dentry_operations ovl_dentry_operations = {
271 .d_release = ovl_dentry_release, 277 .d_release = ovl_dentry_release,
272}; 278};
273 279
274static struct ovl_entry *ovl_alloc_entry(void) 280static struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
275{ 281{
276 return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL); 282 size_t size = offsetof(struct ovl_entry, lowerstack[numlower]);
283 struct ovl_entry *oe = kzalloc(size, GFP_KERNEL);
284
285 if (oe)
286 oe->numlower = numlower;
287
288 return oe;
277} 289}
278 290
279static inline struct dentry *ovl_lookup_real(struct dentry *dir, 291static inline struct dentry *ovl_lookup_real(struct dentry *dir,
@@ -295,82 +307,154 @@ static inline struct dentry *ovl_lookup_real(struct dentry *dir,
295 return dentry; 307 return dentry;
296} 308}
297 309
310/*
311 * Returns next layer in stack starting from top.
312 * Returns -1 if this is the last layer.
313 */
314int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
315{
316 struct ovl_entry *oe = dentry->d_fsdata;
317
318 BUG_ON(idx < 0);
319 if (idx == 0) {
320 ovl_path_upper(dentry, path);
321 if (path->dentry)
322 return oe->numlower ? 1 : -1;
323 idx++;
324 }
325 BUG_ON(idx > oe->numlower);
326 *path = oe->lowerstack[idx - 1];
327
328 return (idx < oe->numlower) ? idx + 1 : -1;
329}
330
298struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, 331struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
299 unsigned int flags) 332 unsigned int flags)
300{ 333{
301 struct ovl_entry *oe; 334 struct ovl_entry *oe;
302 struct dentry *upperdir; 335 struct ovl_entry *poe = dentry->d_parent->d_fsdata;
303 struct dentry *lowerdir; 336 struct path *stack = NULL;
304 struct dentry *upperdentry = NULL; 337 struct dentry *upperdir, *upperdentry = NULL;
305 struct dentry *lowerdentry = NULL; 338 unsigned int ctr = 0;
306 struct inode *inode = NULL; 339 struct inode *inode = NULL;
340 bool upperopaque = false;
341 struct dentry *this, *prev = NULL;
342 unsigned int i;
307 int err; 343 int err;
308 344
309 err = -ENOMEM; 345 upperdir = ovl_upperdentry_dereference(poe);
310 oe = ovl_alloc_entry();
311 if (!oe)
312 goto out;
313
314 upperdir = ovl_dentry_upper(dentry->d_parent);
315 lowerdir = ovl_dentry_lower(dentry->d_parent);
316
317 if (upperdir) { 346 if (upperdir) {
318 upperdentry = ovl_lookup_real(upperdir, &dentry->d_name); 347 this = ovl_lookup_real(upperdir, &dentry->d_name);
319 err = PTR_ERR(upperdentry); 348 err = PTR_ERR(this);
320 if (IS_ERR(upperdentry)) 349 if (IS_ERR(this))
321 goto out_put_dir; 350 goto out;
322 351
323 if (lowerdir && upperdentry) { 352 if (this) {
324 if (ovl_is_whiteout(upperdentry)) { 353 if (ovl_is_whiteout(this)) {
325 dput(upperdentry); 354 dput(this);
326 upperdentry = NULL; 355 this = NULL;
327 oe->opaque = true; 356 upperopaque = true;
328 } else if (ovl_is_opaquedir(upperdentry)) { 357 } else if (poe->numlower && ovl_is_opaquedir(this)) {
329 oe->opaque = true; 358 upperopaque = true;
330 } 359 }
331 } 360 }
361 upperdentry = prev = this;
332 } 362 }
333 if (lowerdir && !oe->opaque) { 363
334 lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name); 364 if (!upperopaque && poe->numlower) {
335 err = PTR_ERR(lowerdentry); 365 err = -ENOMEM;
336 if (IS_ERR(lowerdentry)) 366 stack = kcalloc(poe->numlower, sizeof(struct path), GFP_KERNEL);
337 goto out_dput_upper; 367 if (!stack)
368 goto out_put_upper;
338 } 369 }
339 370
340 if (lowerdentry && upperdentry && 371 for (i = 0; !upperopaque && i < poe->numlower; i++) {
341 (!S_ISDIR(upperdentry->d_inode->i_mode) || 372 bool opaque = false;
342 !S_ISDIR(lowerdentry->d_inode->i_mode))) { 373 struct path lowerpath = poe->lowerstack[i];
343 dput(lowerdentry); 374
344 lowerdentry = NULL; 375 this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name);
345 oe->opaque = true; 376 err = PTR_ERR(this);
377 if (IS_ERR(this)) {
378 /*
379 * If it's positive, then treat ENAMETOOLONG as ENOENT.
380 */
381 if (err == -ENAMETOOLONG && (upperdentry || ctr))
382 continue;
383 goto out_put;
384 }
385 if (!this)
386 continue;
387 if (ovl_is_whiteout(this)) {
388 dput(this);
389 break;
390 }
391 /*
392 * Only makes sense to check opaque dir if this is not the
393 * lowermost layer.
394 */
395 if (i < poe->numlower - 1 && ovl_is_opaquedir(this))
396 opaque = true;
397
398 if (prev && (!S_ISDIR(prev->d_inode->i_mode) ||
399 !S_ISDIR(this->d_inode->i_mode))) {
400 /*
401 * FIXME: check for upper-opaqueness maybe better done
402 * in remove code.
403 */
404 if (prev == upperdentry)
405 upperopaque = true;
406 dput(this);
407 break;
408 }
409 /*
410 * If this is a non-directory then stop here.
411 */
412 if (!S_ISDIR(this->d_inode->i_mode))
413 opaque = true;
414
415 stack[ctr].dentry = this;
416 stack[ctr].mnt = lowerpath.mnt;
417 ctr++;
418 prev = this;
419 if (opaque)
420 break;
346 } 421 }
347 422
348 if (lowerdentry || upperdentry) { 423 oe = ovl_alloc_entry(ctr);
424 err = -ENOMEM;
425 if (!oe)
426 goto out_put;
427
428 if (upperdentry || ctr) {
349 struct dentry *realdentry; 429 struct dentry *realdentry;
350 430
351 realdentry = upperdentry ? upperdentry : lowerdentry; 431 realdentry = upperdentry ? upperdentry : stack[0].dentry;
432
352 err = -ENOMEM; 433 err = -ENOMEM;
353 inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode, 434 inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode,
354 oe); 435 oe);
355 if (!inode) 436 if (!inode)
356 goto out_dput; 437 goto out_free_oe;
357 ovl_copyattr(realdentry->d_inode, inode); 438 ovl_copyattr(realdentry->d_inode, inode);
358 } 439 }
359 440
441 oe->opaque = upperopaque;
360 oe->__upperdentry = upperdentry; 442 oe->__upperdentry = upperdentry;
361 oe->lowerdentry = lowerdentry; 443 memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
362 444 kfree(stack);
363 dentry->d_fsdata = oe; 445 dentry->d_fsdata = oe;
364 d_add(dentry, inode); 446 d_add(dentry, inode);
365 447
366 return NULL; 448 return NULL;
367 449
368out_dput: 450out_free_oe:
369 dput(lowerdentry);
370out_dput_upper:
371 dput(upperdentry);
372out_put_dir:
373 kfree(oe); 451 kfree(oe);
452out_put:
453 for (i = 0; i < ctr; i++)
454 dput(stack[i].dentry);
455 kfree(stack);
456out_put_upper:
457 dput(upperdentry);
374out: 458out:
375 return ERR_PTR(err); 459 return ERR_PTR(err);
376} 460}
@@ -383,10 +467,12 @@ struct file *ovl_path_open(struct path *path, int flags)
383static void ovl_put_super(struct super_block *sb) 467static void ovl_put_super(struct super_block *sb)
384{ 468{
385 struct ovl_fs *ufs = sb->s_fs_info; 469 struct ovl_fs *ufs = sb->s_fs_info;
470 unsigned i;
386 471
387 dput(ufs->workdir); 472 dput(ufs->workdir);
388 mntput(ufs->upper_mnt); 473 mntput(ufs->upper_mnt);
389 mntput(ufs->lower_mnt); 474 for (i = 0; i < ufs->numlower; i++)
475 mntput(ufs->lower_mnt[i]);
390 476
391 kfree(ufs->config.lowerdir); 477 kfree(ufs->config.lowerdir);
392 kfree(ufs->config.upperdir); 478 kfree(ufs->config.upperdir);
@@ -400,7 +486,7 @@ static void ovl_put_super(struct super_block *sb)
400 * @buf: The struct kstatfs to fill in with stats 486 * @buf: The struct kstatfs to fill in with stats
401 * 487 *
402 * Get the filesystem statistics. As writes always target the upper layer 488 * Get the filesystem statistics. As writes always target the upper layer
403 * filesystem pass the statfs to the same filesystem. 489 * filesystem pass the statfs to the upper filesystem (if it exists)
404 */ 490 */
405static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) 491static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
406{ 492{
@@ -409,7 +495,7 @@ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
409 struct path path; 495 struct path path;
410 int err; 496 int err;
411 497
412 ovl_path_upper(root_dentry, &path); 498 ovl_path_real(root_dentry, &path);
413 499
414 err = vfs_statfs(&path, buf); 500 err = vfs_statfs(&path, buf);
415 if (!err) { 501 if (!err) {
@@ -432,8 +518,21 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
432 struct ovl_fs *ufs = sb->s_fs_info; 518 struct ovl_fs *ufs = sb->s_fs_info;
433 519
434 seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir); 520 seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir);
435 seq_printf(m, ",upperdir=%s", ufs->config.upperdir); 521 if (ufs->config.upperdir) {
436 seq_printf(m, ",workdir=%s", ufs->config.workdir); 522 seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
523 seq_printf(m, ",workdir=%s", ufs->config.workdir);
524 }
525 return 0;
526}
527
528static int ovl_remount(struct super_block *sb, int *flags, char *data)
529{
530 struct ovl_fs *ufs = sb->s_fs_info;
531
532 if (!(*flags & MS_RDONLY) &&
533 (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)))
534 return -EROFS;
535
437 return 0; 536 return 0;
438} 537}
439 538
@@ -441,6 +540,7 @@ static const struct super_operations ovl_super_operations = {
441 .put_super = ovl_put_super, 540 .put_super = ovl_put_super,
442 .statfs = ovl_statfs, 541 .statfs = ovl_statfs,
443 .show_options = ovl_show_options, 542 .show_options = ovl_show_options,
543 .remount_fs = ovl_remount,
444}; 544};
445 545
446enum { 546enum {
@@ -585,24 +685,6 @@ static void ovl_unescape(char *s)
585 } 685 }
586} 686}
587 687
588static int ovl_mount_dir(const char *name, struct path *path)
589{
590 int err;
591 char *tmp = kstrdup(name, GFP_KERNEL);
592
593 if (!tmp)
594 return -ENOMEM;
595
596 ovl_unescape(tmp);
597 err = kern_path(tmp, LOOKUP_FOLLOW, path);
598 if (err) {
599 pr_err("overlayfs: failed to resolve '%s': %i\n", tmp, err);
600 err = -EINVAL;
601 }
602 kfree(tmp);
603 return err;
604}
605
606static bool ovl_is_allowed_fs_type(struct dentry *root) 688static bool ovl_is_allowed_fs_type(struct dentry *root)
607{ 689{
608 const struct dentry_operations *dop = root->d_op; 690 const struct dentry_operations *dop = root->d_op;
@@ -622,6 +704,75 @@ static bool ovl_is_allowed_fs_type(struct dentry *root)
622 return true; 704 return true;
623} 705}
624 706
707static int ovl_mount_dir_noesc(const char *name, struct path *path)
708{
709 int err = -EINVAL;
710
711 if (!*name) {
712 pr_err("overlayfs: empty lowerdir\n");
713 goto out;
714 }
715 err = kern_path(name, LOOKUP_FOLLOW, path);
716 if (err) {
717 pr_err("overlayfs: failed to resolve '%s': %i\n", name, err);
718 goto out;
719 }
720 err = -EINVAL;
721 if (!ovl_is_allowed_fs_type(path->dentry)) {
722 pr_err("overlayfs: filesystem on '%s' not supported\n", name);
723 goto out_put;
724 }
725 if (!S_ISDIR(path->dentry->d_inode->i_mode)) {
726 pr_err("overlayfs: '%s' not a directory\n", name);
727 goto out_put;
728 }
729 return 0;
730
731out_put:
732 path_put(path);
733out:
734 return err;
735}
736
737static int ovl_mount_dir(const char *name, struct path *path)
738{
739 int err = -ENOMEM;
740 char *tmp = kstrdup(name, GFP_KERNEL);
741
742 if (tmp) {
743 ovl_unescape(tmp);
744 err = ovl_mount_dir_noesc(tmp, path);
745 kfree(tmp);
746 }
747 return err;
748}
749
750static int ovl_lower_dir(const char *name, struct path *path, long *namelen,
751 int *stack_depth)
752{
753 int err;
754 struct kstatfs statfs;
755
756 err = ovl_mount_dir_noesc(name, path);
757 if (err)
758 goto out;
759
760 err = vfs_statfs(path, &statfs);
761 if (err) {
762 pr_err("overlayfs: statfs failed on '%s'\n", name);
763 goto out_put;
764 }
765 *namelen = max(*namelen, statfs.f_namelen);
766 *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth);
767
768 return 0;
769
770out_put:
771 path_put(path);
772out:
773 return err;
774}
775
625/* Workdir should not be subdir of upperdir and vice versa */ 776/* Workdir should not be subdir of upperdir and vice versa */
626static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir) 777static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir)
627{ 778{
@@ -634,16 +785,39 @@ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir)
634 return ok; 785 return ok;
635} 786}
636 787
788static unsigned int ovl_split_lowerdirs(char *str)
789{
790 unsigned int ctr = 1;
791 char *s, *d;
792
793 for (s = d = str;; s++, d++) {
794 if (*s == '\\') {
795 s++;
796 } else if (*s == ':') {
797 *d = '\0';
798 ctr++;
799 continue;
800 }
801 *d = *s;
802 if (!*s)
803 break;
804 }
805 return ctr;
806}
807
637static int ovl_fill_super(struct super_block *sb, void *data, int silent) 808static int ovl_fill_super(struct super_block *sb, void *data, int silent)
638{ 809{
639 struct path lowerpath; 810 struct path upperpath = { NULL, NULL };
640 struct path upperpath; 811 struct path workpath = { NULL, NULL };
641 struct path workpath;
642 struct inode *root_inode;
643 struct dentry *root_dentry; 812 struct dentry *root_dentry;
644 struct ovl_entry *oe; 813 struct ovl_entry *oe;
645 struct ovl_fs *ufs; 814 struct ovl_fs *ufs;
646 struct kstatfs statfs; 815 struct path *stack = NULL;
816 char *lowertmp;
817 char *lower;
818 unsigned int numlower;
819 unsigned int stacklen = 0;
820 unsigned int i;
647 int err; 821 int err;
648 822
649 err = -ENOMEM; 823 err = -ENOMEM;
@@ -655,123 +829,135 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
655 if (err) 829 if (err)
656 goto out_free_config; 830 goto out_free_config;
657 831
658 /* FIXME: workdir is not needed for a R/O mount */
659 err = -EINVAL; 832 err = -EINVAL;
660 if (!ufs->config.upperdir || !ufs->config.lowerdir || 833 if (!ufs->config.lowerdir) {
661 !ufs->config.workdir) { 834 pr_err("overlayfs: missing 'lowerdir'\n");
662 pr_err("overlayfs: missing upperdir or lowerdir or workdir\n");
663 goto out_free_config; 835 goto out_free_config;
664 } 836 }
665 837
666 err = -ENOMEM; 838 sb->s_stack_depth = 0;
667 oe = ovl_alloc_entry(); 839 if (ufs->config.upperdir) {
668 if (oe == NULL) 840 /* FIXME: workdir is not needed for a R/O mount */
669 goto out_free_config; 841 if (!ufs->config.workdir) {
670 842 pr_err("overlayfs: missing 'workdir'\n");
671 err = ovl_mount_dir(ufs->config.upperdir, &upperpath); 843 goto out_free_config;
672 if (err) 844 }
673 goto out_free_oe;
674
675 err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath);
676 if (err)
677 goto out_put_upperpath;
678 845
679 err = ovl_mount_dir(ufs->config.workdir, &workpath); 846 err = ovl_mount_dir(ufs->config.upperdir, &upperpath);
680 if (err) 847 if (err)
681 goto out_put_lowerpath; 848 goto out_free_config;
682 849
683 err = -EINVAL; 850 err = ovl_mount_dir(ufs->config.workdir, &workpath);
684 if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) || 851 if (err)
685 !S_ISDIR(lowerpath.dentry->d_inode->i_mode) || 852 goto out_put_upperpath;
686 !S_ISDIR(workpath.dentry->d_inode->i_mode)) {
687 pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n");
688 goto out_put_workpath;
689 }
690 853
691 if (upperpath.mnt != workpath.mnt) { 854 err = -EINVAL;
692 pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); 855 if (upperpath.mnt != workpath.mnt) {
693 goto out_put_workpath; 856 pr_err("overlayfs: workdir and upperdir must reside under the same mount\n");
694 } 857 goto out_put_workpath;
695 if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { 858 }
696 pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); 859 if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) {
697 goto out_put_workpath; 860 pr_err("overlayfs: workdir and upperdir must be separate subtrees\n");
861 goto out_put_workpath;
862 }
863 sb->s_stack_depth = upperpath.mnt->mnt_sb->s_stack_depth;
698 } 864 }
699 865 err = -ENOMEM;
700 if (!ovl_is_allowed_fs_type(upperpath.dentry)) { 866 lowertmp = kstrdup(ufs->config.lowerdir, GFP_KERNEL);
701 pr_err("overlayfs: filesystem of upperdir is not supported\n"); 867 if (!lowertmp)
702 goto out_put_workpath; 868 goto out_put_workpath;
703 }
704 869
705 if (!ovl_is_allowed_fs_type(lowerpath.dentry)) { 870 err = -EINVAL;
706 pr_err("overlayfs: filesystem of lowerdir is not supported\n"); 871 stacklen = ovl_split_lowerdirs(lowertmp);
707 goto out_put_workpath; 872 if (stacklen > OVL_MAX_STACK)
708 } 873 goto out_free_lowertmp;
874
875 stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL);
876 if (!stack)
877 goto out_free_lowertmp;
878
879 lower = lowertmp;
880 for (numlower = 0; numlower < stacklen; numlower++) {
881 err = ovl_lower_dir(lower, &stack[numlower],
882 &ufs->lower_namelen, &sb->s_stack_depth);
883 if (err)
884 goto out_put_lowerpath;
709 885
710 err = vfs_statfs(&lowerpath, &statfs); 886 lower = strchr(lower, '\0') + 1;
711 if (err) {
712 pr_err("overlayfs: statfs failed on lowerpath\n");
713 goto out_put_workpath;
714 } 887 }
715 ufs->lower_namelen = statfs.f_namelen;
716
717 sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth,
718 lowerpath.mnt->mnt_sb->s_stack_depth) + 1;
719 888
720 err = -EINVAL; 889 err = -EINVAL;
890 sb->s_stack_depth++;
721 if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { 891 if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
722 pr_err("overlayfs: maximum fs stacking depth exceeded\n"); 892 pr_err("overlayfs: maximum fs stacking depth exceeded\n");
723 goto out_put_workpath; 893 goto out_put_lowerpath;
724 } 894 }
725 895
726 ufs->upper_mnt = clone_private_mount(&upperpath); 896 if (ufs->config.upperdir) {
727 err = PTR_ERR(ufs->upper_mnt); 897 ufs->upper_mnt = clone_private_mount(&upperpath);
728 if (IS_ERR(ufs->upper_mnt)) { 898 err = PTR_ERR(ufs->upper_mnt);
729 pr_err("overlayfs: failed to clone upperpath\n"); 899 if (IS_ERR(ufs->upper_mnt)) {
730 goto out_put_workpath; 900 pr_err("overlayfs: failed to clone upperpath\n");
731 } 901 goto out_put_lowerpath;
902 }
732 903
733 ufs->lower_mnt = clone_private_mount(&lowerpath); 904 ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
734 err = PTR_ERR(ufs->lower_mnt); 905 err = PTR_ERR(ufs->workdir);
735 if (IS_ERR(ufs->lower_mnt)) { 906 if (IS_ERR(ufs->workdir)) {
736 pr_err("overlayfs: failed to clone lowerpath\n"); 907 pr_err("overlayfs: failed to create directory %s/%s\n",
737 goto out_put_upper_mnt; 908 ufs->config.workdir, OVL_WORKDIR_NAME);
909 goto out_put_upper_mnt;
910 }
738 } 911 }
739 912
740 ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); 913 err = -ENOMEM;
741 err = PTR_ERR(ufs->workdir); 914 ufs->lower_mnt = kcalloc(numlower, sizeof(struct vfsmount *), GFP_KERNEL);
742 if (IS_ERR(ufs->workdir)) { 915 if (ufs->lower_mnt == NULL)
743 pr_err("overlayfs: failed to create directory %s/%s\n", 916 goto out_put_workdir;
744 ufs->config.workdir, OVL_WORKDIR_NAME); 917 for (i = 0; i < numlower; i++) {
745 goto out_put_lower_mnt; 918 struct vfsmount *mnt = clone_private_mount(&stack[i]);
746 }
747 919
748 /* 920 err = PTR_ERR(mnt);
749 * Make lower_mnt R/O. That way fchmod/fchown on lower file 921 if (IS_ERR(mnt)) {
750 * will fail instead of modifying lower fs. 922 pr_err("overlayfs: failed to clone lowerpath\n");
751 */ 923 goto out_put_lower_mnt;
752 ufs->lower_mnt->mnt_flags |= MNT_READONLY; 924 }
925 /*
926 * Make lower_mnt R/O. That way fchmod/fchown on lower file
927 * will fail instead of modifying lower fs.
928 */
929 mnt->mnt_flags |= MNT_READONLY;
930
931 ufs->lower_mnt[ufs->numlower] = mnt;
932 ufs->numlower++;
933 }
753 934
754 /* If the upper fs is r/o, we mark overlayfs r/o too */ 935 /* If the upper fs is r/o or nonexistent, we mark overlayfs r/o too */
755 if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) 936 if (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY))
756 sb->s_flags |= MS_RDONLY; 937 sb->s_flags |= MS_RDONLY;
757 938
758 sb->s_d_op = &ovl_dentry_operations; 939 sb->s_d_op = &ovl_dentry_operations;
759 940
760 err = -ENOMEM; 941 err = -ENOMEM;
761 root_inode = ovl_new_inode(sb, S_IFDIR, oe); 942 oe = ovl_alloc_entry(numlower);
762 if (!root_inode) 943 if (!oe)
763 goto out_put_workdir; 944 goto out_put_lower_mnt;
764 945
765 root_dentry = d_make_root(root_inode); 946 root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, oe));
766 if (!root_dentry) 947 if (!root_dentry)
767 goto out_put_workdir; 948 goto out_free_oe;
768 949
769 mntput(upperpath.mnt); 950 mntput(upperpath.mnt);
770 mntput(lowerpath.mnt); 951 for (i = 0; i < numlower; i++)
952 mntput(stack[i].mnt);
771 path_put(&workpath); 953 path_put(&workpath);
954 kfree(lowertmp);
772 955
773 oe->__upperdentry = upperpath.dentry; 956 oe->__upperdentry = upperpath.dentry;
774 oe->lowerdentry = lowerpath.dentry; 957 for (i = 0; i < numlower; i++) {
958 oe->lowerstack[i].dentry = stack[i].dentry;
959 oe->lowerstack[i].mnt = ufs->lower_mnt[i];
960 }
775 961
776 root_dentry->d_fsdata = oe; 962 root_dentry->d_fsdata = oe;
777 963
@@ -782,20 +968,26 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
782 968
783 return 0; 969 return 0;
784 970
971out_free_oe:
972 kfree(oe);
973out_put_lower_mnt:
974 for (i = 0; i < ufs->numlower; i++)
975 mntput(ufs->lower_mnt[i]);
976 kfree(ufs->lower_mnt);
785out_put_workdir: 977out_put_workdir:
786 dput(ufs->workdir); 978 dput(ufs->workdir);
787out_put_lower_mnt:
788 mntput(ufs->lower_mnt);
789out_put_upper_mnt: 979out_put_upper_mnt:
790 mntput(ufs->upper_mnt); 980 mntput(ufs->upper_mnt);
981out_put_lowerpath:
982 for (i = 0; i < numlower; i++)
983 path_put(&stack[i]);
984 kfree(stack);
985out_free_lowertmp:
986 kfree(lowertmp);
791out_put_workpath: 987out_put_workpath:
792 path_put(&workpath); 988 path_put(&workpath);
793out_put_lowerpath:
794 path_put(&lowerpath);
795out_put_upperpath: 989out_put_upperpath:
796 path_put(&upperpath); 990 path_put(&upperpath);
797out_free_oe:
798 kfree(oe);
799out_free_config: 991out_free_config:
800 kfree(ufs->config.lowerdir); 992 kfree(ufs->config.lowerdir);
801 kfree(ufs->config.upperdir); 993 kfree(ufs->config.upperdir);
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 0855f772cd41..3a48bb789c9f 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -564,13 +564,11 @@ posix_acl_create(struct inode *dir, umode_t *mode,
564 564
565 *acl = posix_acl_clone(p, GFP_NOFS); 565 *acl = posix_acl_clone(p, GFP_NOFS);
566 if (!*acl) 566 if (!*acl)
567 return -ENOMEM; 567 goto no_mem;
568 568
569 ret = posix_acl_create_masq(*acl, mode); 569 ret = posix_acl_create_masq(*acl, mode);
570 if (ret < 0) { 570 if (ret < 0)
571 posix_acl_release(*acl); 571 goto no_mem_clone;
572 return -ENOMEM;
573 }
574 572
575 if (ret == 0) { 573 if (ret == 0) {
576 posix_acl_release(*acl); 574 posix_acl_release(*acl);
@@ -591,6 +589,12 @@ no_acl:
591 *default_acl = NULL; 589 *default_acl = NULL;
592 *acl = NULL; 590 *acl = NULL;
593 return 0; 591 return 0;
592
593no_mem_clone:
594 posix_acl_release(*acl);
595no_mem:
596 posix_acl_release(p);
597 return -ENOMEM;
594} 598}
595EXPORT_SYMBOL_GPL(posix_acl_create); 599EXPORT_SYMBOL_GPL(posix_acl_create);
596 600
@@ -772,7 +776,7 @@ posix_acl_xattr_get(struct dentry *dentry, const char *name,
772 776
773 if (!IS_POSIXACL(dentry->d_inode)) 777 if (!IS_POSIXACL(dentry->d_inode))
774 return -EOPNOTSUPP; 778 return -EOPNOTSUPP;
775 if (S_ISLNK(dentry->d_inode->i_mode)) 779 if (d_is_symlink(dentry))
776 return -EOPNOTSUPP; 780 return -EOPNOTSUPP;
777 781
778 acl = get_acl(dentry->d_inode, type); 782 acl = get_acl(dentry->d_inode, type);
@@ -832,7 +836,7 @@ posix_acl_xattr_list(struct dentry *dentry, char *list, size_t list_size,
832 836
833 if (!IS_POSIXACL(dentry->d_inode)) 837 if (!IS_POSIXACL(dentry->d_inode))
834 return -EOPNOTSUPP; 838 return -EOPNOTSUPP;
835 if (S_ISLNK(dentry->d_inode->i_mode)) 839 if (d_is_symlink(dentry))
836 return -EOPNOTSUPP; 840 return -EOPNOTSUPP;
837 841
838 if (type == ACL_TYPE_ACCESS) 842 if (type == ACL_TYPE_ACCESS)
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 3309f59d421b..be65b2082135 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -19,7 +19,6 @@
19#include <linux/mount.h> 19#include <linux/mount.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/idr.h> 21#include <linux/idr.h>
22#include <linux/namei.h>
23#include <linux/bitops.h> 22#include <linux/bitops.h>
24#include <linux/spinlock.h> 23#include <linux/spinlock.h>
25#include <linux/completion.h> 24#include <linux/completion.h>
@@ -223,17 +222,6 @@ void proc_free_inum(unsigned int inum)
223 spin_unlock_irqrestore(&proc_inum_lock, flags); 222 spin_unlock_irqrestore(&proc_inum_lock, flags);
224} 223}
225 224
226static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
227{
228 nd_set_link(nd, __PDE_DATA(dentry->d_inode));
229 return NULL;
230}
231
232static const struct inode_operations proc_link_inode_operations = {
233 .readlink = generic_readlink,
234 .follow_link = proc_follow_link,
235};
236
237/* 225/*
238 * Don't create negative dentries here, return -ENOENT by hand 226 * Don't create negative dentries here, return -ENOENT by hand
239 * instead. 227 * instead.
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 13a50a32652d..7697b6621cfd 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -23,6 +23,7 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/mount.h> 24#include <linux/mount.h>
25#include <linux/magic.h> 25#include <linux/magic.h>
26#include <linux/namei.h>
26 27
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
28 29
@@ -393,6 +394,26 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
393}; 394};
394#endif 395#endif
395 396
397static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
398{
399 struct proc_dir_entry *pde = PDE(dentry->d_inode);
400 if (unlikely(!use_pde(pde)))
401 return ERR_PTR(-EINVAL);
402 nd_set_link(nd, pde->data);
403 return pde;
404}
405
406static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
407{
408 unuse_pde(p);
409}
410
411const struct inode_operations proc_link_inode_operations = {
412 .readlink = generic_readlink,
413 .follow_link = proc_follow_link,
414 .put_link = proc_put_link,
415};
416
396struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) 417struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
397{ 418{
398 struct inode *inode = new_inode_pseudo(sb); 419 struct inode *inode = new_inode_pseudo(sb);
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 6fcdba573e0f..c835b94c0cd3 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -200,6 +200,7 @@ struct pde_opener {
200 int closing; 200 int closing;
201 struct completion *c; 201 struct completion *c;
202}; 202};
203extern const struct inode_operations proc_link_inode_operations;
203 204
204extern const struct inode_operations proc_pid_link_inode_operations; 205extern const struct inode_operations proc_pid_link_inode_operations;
205 206
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 04b06146bae2..4e781e697c90 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -266,7 +266,7 @@ static int reiserfs_for_each_xattr(struct inode *inode,
266 for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) { 266 for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
267 struct dentry *dentry = buf.dentries[i]; 267 struct dentry *dentry = buf.dentries[i];
268 268
269 if (!S_ISDIR(dentry->d_inode->i_mode)) 269 if (!d_is_dir(dentry))
270 err = action(dentry, data); 270 err = action(dentry, data);
271 271
272 dput(dentry); 272 dput(dentry);
@@ -322,7 +322,7 @@ static int delete_one_xattr(struct dentry *dentry, void *data)
322 struct inode *dir = dentry->d_parent->d_inode; 322 struct inode *dir = dentry->d_parent->d_inode;
323 323
324 /* This is the xattr dir, handle specially. */ 324 /* This is the xattr dir, handle specially. */
325 if (S_ISDIR(dentry->d_inode->i_mode)) 325 if (d_is_dir(dentry))
326 return xattr_rmdir(dir, dentry); 326 return xattr_rmdir(dir, dentry);
327 327
328 return xattr_unlink(dir, dentry); 328 return xattr_unlink(dir, dentry);
diff --git a/fs/super.c b/fs/super.c
index 65a53efc1cf4..2b7dc90ccdbb 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -71,7 +71,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
71 if (!(sc->gfp_mask & __GFP_FS)) 71 if (!(sc->gfp_mask & __GFP_FS))
72 return SHRINK_STOP; 72 return SHRINK_STOP;
73 73
74 if (!grab_super_passive(sb)) 74 if (!trylock_super(sb))
75 return SHRINK_STOP; 75 return SHRINK_STOP;
76 76
77 if (sb->s_op->nr_cached_objects) 77 if (sb->s_op->nr_cached_objects)
@@ -105,7 +105,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
105 freed += sb->s_op->free_cached_objects(sb, sc); 105 freed += sb->s_op->free_cached_objects(sb, sc);
106 } 106 }
107 107
108 drop_super(sb); 108 up_read(&sb->s_umount);
109 return freed; 109 return freed;
110} 110}
111 111
@@ -118,7 +118,7 @@ static unsigned long super_cache_count(struct shrinker *shrink,
118 sb = container_of(shrink, struct super_block, s_shrink); 118 sb = container_of(shrink, struct super_block, s_shrink);
119 119
120 /* 120 /*
121 * Don't call grab_super_passive as it is a potential 121 * Don't call trylock_super as it is a potential
122 * scalability bottleneck. The counts could get updated 122 * scalability bottleneck. The counts could get updated
123 * between super_cache_count and super_cache_scan anyway. 123 * between super_cache_count and super_cache_scan anyway.
124 * Call to super_cache_count with shrinker_rwsem held 124 * Call to super_cache_count with shrinker_rwsem held
@@ -348,35 +348,31 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
348} 348}
349 349
350/* 350/*
351 * grab_super_passive - acquire a passive reference 351 * trylock_super - try to grab ->s_umount shared
352 * @sb: reference we are trying to grab 352 * @sb: reference we are trying to grab
353 * 353 *
354 * Tries to acquire a passive reference. This is used in places where we 354 * Try to prevent fs shutdown. This is used in places where we
355 * cannot take an active reference but we need to ensure that the 355 * cannot take an active reference but we need to ensure that the
356 * superblock does not go away while we are working on it. It returns 356 * filesystem is not shut down while we are working on it. It returns
357 * false if a reference was not gained, and returns true with the s_umount 357 * false if we cannot acquire s_umount or if we lose the race and
358 * lock held in read mode if a reference is gained. On successful return, 358 * filesystem already got into shutdown, and returns true with the s_umount
359 * the caller must drop the s_umount lock and the passive reference when 359 * lock held in read mode in case of success. On successful return,
360 * done. 360 * the caller must drop the s_umount lock when done.
361 *
362 * Note that unlike get_super() et.al. this one does *not* bump ->s_count.
363 * The reason why it's safe is that we are OK with doing trylock instead
364 * of down_read(). There's a couple of places that are OK with that, but
365 * it's very much not a general-purpose interface.
361 */ 366 */
362bool grab_super_passive(struct super_block *sb) 367bool trylock_super(struct super_block *sb)
363{ 368{
364 spin_lock(&sb_lock);
365 if (hlist_unhashed(&sb->s_instances)) {
366 spin_unlock(&sb_lock);
367 return false;
368 }
369
370 sb->s_count++;
371 spin_unlock(&sb_lock);
372
373 if (down_read_trylock(&sb->s_umount)) { 369 if (down_read_trylock(&sb->s_umount)) {
374 if (sb->s_root && (sb->s_flags & MS_BORN)) 370 if (!hlist_unhashed(&sb->s_instances) &&
371 sb->s_root && (sb->s_flags & MS_BORN))
375 return true; 372 return true;
376 up_read(&sb->s_umount); 373 up_read(&sb->s_umount);
377 } 374 }
378 375
379 put_super(sb);
380 return false; 376 return false;
381} 377}
382 378
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index d61799949580..df6828570e87 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -121,3 +121,4 @@ xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
121xfs-$(CONFIG_PROC_FS) += xfs_stats.o 121xfs-$(CONFIG_PROC_FS) += xfs_stats.o
122xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o 122xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o
123xfs-$(CONFIG_COMPAT) += xfs_ioctl32.o 123xfs-$(CONFIG_COMPAT) += xfs_ioctl32.o
124xfs-$(CONFIG_NFSD_PNFS) += xfs_pnfs.o
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 5eb4a14e0a0f..b97359ba2648 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -30,6 +30,7 @@
30#include "xfs_trace.h" 30#include "xfs_trace.h"
31#include "xfs_icache.h" 31#include "xfs_icache.h"
32#include "xfs_log.h" 32#include "xfs_log.h"
33#include "xfs_pnfs.h"
33 34
34/* 35/*
35 * Note that we only accept fileids which are long enough rather than allow 36 * Note that we only accept fileids which are long enough rather than allow
@@ -245,4 +246,9 @@ const struct export_operations xfs_export_operations = {
245 .fh_to_parent = xfs_fs_fh_to_parent, 246 .fh_to_parent = xfs_fs_fh_to_parent,
246 .get_parent = xfs_fs_get_parent, 247 .get_parent = xfs_fs_get_parent,
247 .commit_metadata = xfs_fs_nfs_commit_metadata, 248 .commit_metadata = xfs_fs_nfs_commit_metadata,
249#ifdef CONFIG_NFSD_PNFS
250 .get_uuid = xfs_fs_get_uuid,
251 .map_blocks = xfs_fs_map_blocks,
252 .commit_blocks = xfs_fs_commit_blocks,
253#endif
248}; 254};
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 1cdba95c78cb..a2e1cb8a568b 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -36,6 +36,7 @@
36#include "xfs_trace.h" 36#include "xfs_trace.h"
37#include "xfs_log.h" 37#include "xfs_log.h"
38#include "xfs_icache.h" 38#include "xfs_icache.h"
39#include "xfs_pnfs.h"
39 40
40#include <linux/aio.h> 41#include <linux/aio.h>
41#include <linux/dcache.h> 42#include <linux/dcache.h>
@@ -396,7 +397,8 @@ STATIC int /* error (positive) */
396xfs_zero_last_block( 397xfs_zero_last_block(
397 struct xfs_inode *ip, 398 struct xfs_inode *ip,
398 xfs_fsize_t offset, 399 xfs_fsize_t offset,
399 xfs_fsize_t isize) 400 xfs_fsize_t isize,
401 bool *did_zeroing)
400{ 402{
401 struct xfs_mount *mp = ip->i_mount; 403 struct xfs_mount *mp = ip->i_mount;
402 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); 404 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize);
@@ -424,6 +426,7 @@ xfs_zero_last_block(
424 zero_len = mp->m_sb.sb_blocksize - zero_offset; 426 zero_len = mp->m_sb.sb_blocksize - zero_offset;
425 if (isize + zero_len > offset) 427 if (isize + zero_len > offset)
426 zero_len = offset - isize; 428 zero_len = offset - isize;
429 *did_zeroing = true;
427 return xfs_iozero(ip, isize, zero_len); 430 return xfs_iozero(ip, isize, zero_len);
428} 431}
429 432
@@ -442,7 +445,8 @@ int /* error (positive) */
442xfs_zero_eof( 445xfs_zero_eof(
443 struct xfs_inode *ip, 446 struct xfs_inode *ip,
444 xfs_off_t offset, /* starting I/O offset */ 447 xfs_off_t offset, /* starting I/O offset */
445 xfs_fsize_t isize) /* current inode size */ 448 xfs_fsize_t isize, /* current inode size */
449 bool *did_zeroing)
446{ 450{
447 struct xfs_mount *mp = ip->i_mount; 451 struct xfs_mount *mp = ip->i_mount;
448 xfs_fileoff_t start_zero_fsb; 452 xfs_fileoff_t start_zero_fsb;
@@ -464,7 +468,7 @@ xfs_zero_eof(
464 * We only zero a part of that block so it is handled specially. 468 * We only zero a part of that block so it is handled specially.
465 */ 469 */
466 if (XFS_B_FSB_OFFSET(mp, isize) != 0) { 470 if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
467 error = xfs_zero_last_block(ip, offset, isize); 471 error = xfs_zero_last_block(ip, offset, isize, did_zeroing);
468 if (error) 472 if (error)
469 return error; 473 return error;
470 } 474 }
@@ -524,6 +528,7 @@ xfs_zero_eof(
524 if (error) 528 if (error)
525 return error; 529 return error;
526 530
531 *did_zeroing = true;
527 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 532 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
528 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 533 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
529 } 534 }
@@ -554,6 +559,10 @@ restart:
554 if (error) 559 if (error)
555 return error; 560 return error;
556 561
562 error = xfs_break_layouts(inode, iolock);
563 if (error)
564 return error;
565
557 /* 566 /*
558 * If the offset is beyond the size of the file, we need to zero any 567 * If the offset is beyond the size of the file, we need to zero any
559 * blocks that fall between the existing EOF and the start of this 568 * blocks that fall between the existing EOF and the start of this
@@ -562,13 +571,15 @@ restart:
562 * having to redo all checks before. 571 * having to redo all checks before.
563 */ 572 */
564 if (*pos > i_size_read(inode)) { 573 if (*pos > i_size_read(inode)) {
574 bool zero = false;
575
565 if (*iolock == XFS_IOLOCK_SHARED) { 576 if (*iolock == XFS_IOLOCK_SHARED) {
566 xfs_rw_iunlock(ip, *iolock); 577 xfs_rw_iunlock(ip, *iolock);
567 *iolock = XFS_IOLOCK_EXCL; 578 *iolock = XFS_IOLOCK_EXCL;
568 xfs_rw_ilock(ip, *iolock); 579 xfs_rw_ilock(ip, *iolock);
569 goto restart; 580 goto restart;
570 } 581 }
571 error = xfs_zero_eof(ip, *pos, i_size_read(inode)); 582 error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero);
572 if (error) 583 if (error)
573 return error; 584 return error;
574 } 585 }
@@ -822,6 +833,7 @@ xfs_file_fallocate(
822 struct xfs_inode *ip = XFS_I(inode); 833 struct xfs_inode *ip = XFS_I(inode);
823 long error; 834 long error;
824 enum xfs_prealloc_flags flags = 0; 835 enum xfs_prealloc_flags flags = 0;
836 uint iolock = XFS_IOLOCK_EXCL;
825 loff_t new_size = 0; 837 loff_t new_size = 0;
826 838
827 if (!S_ISREG(inode->i_mode)) 839 if (!S_ISREG(inode->i_mode))
@@ -830,7 +842,11 @@ xfs_file_fallocate(
830 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)) 842 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
831 return -EOPNOTSUPP; 843 return -EOPNOTSUPP;
832 844
833 xfs_ilock(ip, XFS_IOLOCK_EXCL); 845 xfs_ilock(ip, iolock);
846 error = xfs_break_layouts(inode, &iolock);
847 if (error)
848 goto out_unlock;
849
834 if (mode & FALLOC_FL_PUNCH_HOLE) { 850 if (mode & FALLOC_FL_PUNCH_HOLE) {
835 error = xfs_free_file_space(ip, offset, len); 851 error = xfs_free_file_space(ip, offset, len);
836 if (error) 852 if (error)
@@ -894,7 +910,7 @@ xfs_file_fallocate(
894 } 910 }
895 911
896out_unlock: 912out_unlock:
897 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 913 xfs_iunlock(ip, iolock);
898 return error; 914 return error;
899} 915}
900 916
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index fba6532efba4..74efe5b760dc 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -602,6 +602,12 @@ xfs_growfs_data(
602 if (!mutex_trylock(&mp->m_growlock)) 602 if (!mutex_trylock(&mp->m_growlock))
603 return -EWOULDBLOCK; 603 return -EWOULDBLOCK;
604 error = xfs_growfs_data_private(mp, in); 604 error = xfs_growfs_data_private(mp, in);
605 /*
606 * Increment the generation unconditionally, the error could be from
607 * updating the secondary superblocks, in which case the new size
608 * is live already.
609 */
610 mp->m_generation++;
605 mutex_unlock(&mp->m_growlock); 611 mutex_unlock(&mp->m_growlock);
606 return error; 612 return error;
607} 613}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index daafa1f6d260..6163767aa856 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2867,6 +2867,10 @@ xfs_rename(
2867 * Handle RENAME_EXCHANGE flags 2867 * Handle RENAME_EXCHANGE flags
2868 */ 2868 */
2869 if (flags & RENAME_EXCHANGE) { 2869 if (flags & RENAME_EXCHANGE) {
2870 if (target_ip == NULL) {
2871 error = -EINVAL;
2872 goto error_return;
2873 }
2870 error = xfs_cross_rename(tp, src_dp, src_name, src_ip, 2874 error = xfs_cross_rename(tp, src_dp, src_name, src_ip,
2871 target_dp, target_name, target_ip, 2875 target_dp, target_name, target_ip,
2872 &free_list, &first_block, spaceres); 2876 &free_list, &first_block, spaceres);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 86cd6b39bed7..a1cd55f3f351 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -384,10 +384,11 @@ enum xfs_prealloc_flags {
384 XFS_PREALLOC_INVISIBLE = (1 << 4), 384 XFS_PREALLOC_INVISIBLE = (1 << 4),
385}; 385};
386 386
387int xfs_update_prealloc_flags(struct xfs_inode *, 387int xfs_update_prealloc_flags(struct xfs_inode *ip,
388 enum xfs_prealloc_flags); 388 enum xfs_prealloc_flags flags);
389int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t); 389int xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset,
390int xfs_iozero(struct xfs_inode *, loff_t, size_t); 390 xfs_fsize_t isize, bool *did_zeroing);
391int xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count);
391 392
392 393
393#define IHOLD(ip) \ 394#define IHOLD(ip) \
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index f7afb86c9148..ac4feae45eb3 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -39,6 +39,7 @@
39#include "xfs_icache.h" 39#include "xfs_icache.h"
40#include "xfs_symlink.h" 40#include "xfs_symlink.h"
41#include "xfs_trans.h" 41#include "xfs_trans.h"
42#include "xfs_pnfs.h"
42 43
43#include <linux/capability.h> 44#include <linux/capability.h>
44#include <linux/dcache.h> 45#include <linux/dcache.h>
@@ -286,7 +287,7 @@ xfs_readlink_by_handle(
286 return PTR_ERR(dentry); 287 return PTR_ERR(dentry);
287 288
288 /* Restrict this handle operation to symlinks only. */ 289 /* Restrict this handle operation to symlinks only. */
289 if (!S_ISLNK(dentry->d_inode->i_mode)) { 290 if (!d_is_symlink(dentry)) {
290 error = -EINVAL; 291 error = -EINVAL;
291 goto out_dput; 292 goto out_dput;
292 } 293 }
@@ -608,6 +609,7 @@ xfs_ioc_space(
608{ 609{
609 struct iattr iattr; 610 struct iattr iattr;
610 enum xfs_prealloc_flags flags = 0; 611 enum xfs_prealloc_flags flags = 0;
612 uint iolock = XFS_IOLOCK_EXCL;
611 int error; 613 int error;
612 614
613 /* 615 /*
@@ -636,7 +638,10 @@ xfs_ioc_space(
636 if (error) 638 if (error)
637 return error; 639 return error;
638 640
639 xfs_ilock(ip, XFS_IOLOCK_EXCL); 641 xfs_ilock(ip, iolock);
642 error = xfs_break_layouts(inode, &iolock);
643 if (error)
644 goto out_unlock;
640 645
641 switch (bf->l_whence) { 646 switch (bf->l_whence) {
642 case 0: /*SEEK_SET*/ 647 case 0: /*SEEK_SET*/
@@ -725,7 +730,7 @@ xfs_ioc_space(
725 error = xfs_update_prealloc_flags(ip, flags); 730 error = xfs_update_prealloc_flags(ip, flags);
726 731
727out_unlock: 732out_unlock:
728 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 733 xfs_iunlock(ip, iolock);
729 mnt_drop_write_file(filp); 734 mnt_drop_write_file(filp);
730 return error; 735 return error;
731} 736}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ce80eeb8faa4..e53a90331422 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -37,6 +37,7 @@
37#include "xfs_da_btree.h" 37#include "xfs_da_btree.h"
38#include "xfs_dir2.h" 38#include "xfs_dir2.h"
39#include "xfs_trans_space.h" 39#include "xfs_trans_space.h"
40#include "xfs_pnfs.h"
40 41
41#include <linux/capability.h> 42#include <linux/capability.h>
42#include <linux/xattr.h> 43#include <linux/xattr.h>
@@ -505,7 +506,7 @@ xfs_setattr_mode(
505 inode->i_mode |= mode & ~S_IFMT; 506 inode->i_mode |= mode & ~S_IFMT;
506} 507}
507 508
508static void 509void
509xfs_setattr_time( 510xfs_setattr_time(
510 struct xfs_inode *ip, 511 struct xfs_inode *ip,
511 struct iattr *iattr) 512 struct iattr *iattr)
@@ -750,6 +751,7 @@ xfs_setattr_size(
750 int error; 751 int error;
751 uint lock_flags = 0; 752 uint lock_flags = 0;
752 uint commit_flags = 0; 753 uint commit_flags = 0;
754 bool did_zeroing = false;
753 755
754 trace_xfs_setattr(ip); 756 trace_xfs_setattr(ip);
755 757
@@ -793,20 +795,16 @@ xfs_setattr_size(
793 return error; 795 return error;
794 796
795 /* 797 /*
796 * Now we can make the changes. Before we join the inode to the 798 * File data changes must be complete before we start the transaction to
797 * transaction, take care of the part of the truncation that must be 799 * modify the inode. This needs to be done before joining the inode to
798 * done without the inode lock. This needs to be done before joining 800 * the transaction because the inode cannot be unlocked once it is a
799 * the inode to the transaction, because the inode cannot be unlocked 801 * part of the transaction.
800 * once it is a part of the transaction. 802 *
803 * Start with zeroing any data block beyond EOF that we may expose on
804 * file extension.
801 */ 805 */
802 if (newsize > oldsize) { 806 if (newsize > oldsize) {
803 /* 807 error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing);
804 * Do the first part of growing a file: zero any data in the
805 * last block that is beyond the old EOF. We need to do this
806 * before the inode is joined to the transaction to modify
807 * i_size.
808 */
809 error = xfs_zero_eof(ip, newsize, oldsize);
810 if (error) 808 if (error)
811 return error; 809 return error;
812 } 810 }
@@ -816,23 +814,18 @@ xfs_setattr_size(
816 * any previous writes that are beyond the on disk EOF and the new 814 * any previous writes that are beyond the on disk EOF and the new
817 * EOF that have not been written out need to be written here. If we 815 * EOF that have not been written out need to be written here. If we
818 * do not write the data out, we expose ourselves to the null files 816 * do not write the data out, we expose ourselves to the null files
819 * problem. 817 * problem. Note that this includes any block zeroing we did above;
820 * 818 * otherwise those blocks may not be zeroed after a crash.
821 * Only flush from the on disk size to the smaller of the in memory
822 * file size or the new size as that's the range we really care about
823 * here and prevents waiting for other data not within the range we
824 * care about here.
825 */ 819 */
826 if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) { 820 if (newsize > ip->i_d.di_size &&
821 (oldsize != ip->i_d.di_size || did_zeroing)) {
827 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 822 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
828 ip->i_d.di_size, newsize); 823 ip->i_d.di_size, newsize);
829 if (error) 824 if (error)
830 return error; 825 return error;
831 } 826 }
832 827
833 /* 828 /* Now wait for all direct I/O to complete. */
834 * Wait for all direct I/O to complete.
835 */
836 inode_dio_wait(inode); 829 inode_dio_wait(inode);
837 830
838 /* 831 /*
@@ -979,9 +972,13 @@ xfs_vn_setattr(
979 int error; 972 int error;
980 973
981 if (iattr->ia_valid & ATTR_SIZE) { 974 if (iattr->ia_valid & ATTR_SIZE) {
982 xfs_ilock(ip, XFS_IOLOCK_EXCL); 975 uint iolock = XFS_IOLOCK_EXCL;
983 error = xfs_setattr_size(ip, iattr); 976
984 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 977 xfs_ilock(ip, iolock);
978 error = xfs_break_layouts(dentry->d_inode, &iolock);
979 if (!error)
980 error = xfs_setattr_size(ip, iattr);
981 xfs_iunlock(ip, iolock);
985 } else { 982 } else {
986 error = xfs_setattr_nonsize(ip, iattr, 0); 983 error = xfs_setattr_nonsize(ip, iattr, 0);
987 } 984 }
diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h
index 1c34e4335920..ea7a98e9cb70 100644
--- a/fs/xfs/xfs_iops.h
+++ b/fs/xfs/xfs_iops.h
@@ -32,6 +32,7 @@ extern void xfs_setup_inode(struct xfs_inode *);
32 */ 32 */
33#define XFS_ATTR_NOACL 0x01 /* Don't call posix_acl_chmod */ 33#define XFS_ATTR_NOACL 0x01 /* Don't call posix_acl_chmod */
34 34
35extern void xfs_setattr_time(struct xfs_inode *ip, struct iattr *iattr);
35extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap, 36extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap,
36 int flags); 37 int flags);
37extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap); 38extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index a5b2ff822653..0d8abd6364d9 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -174,6 +174,17 @@ typedef struct xfs_mount {
174 struct workqueue_struct *m_reclaim_workqueue; 174 struct workqueue_struct *m_reclaim_workqueue;
175 struct workqueue_struct *m_log_workqueue; 175 struct workqueue_struct *m_log_workqueue;
176 struct workqueue_struct *m_eofblocks_workqueue; 176 struct workqueue_struct *m_eofblocks_workqueue;
177
178 /*
179 * Generation of the filesysyem layout. This is incremented by each
180 * growfs, and used by the pNFS server to ensure the client updates
181 * its view of the block device once it gets a layout that might
182 * reference the newly added blocks. Does not need to be persistent
183 * as long as we only allow file system size increments, but if we
184 * ever support shrinks it would have to be persisted in addition
185 * to various other kinds of pain inflicted on the pNFS server.
186 */
187 __uint32_t m_generation;
177} xfs_mount_t; 188} xfs_mount_t;
178 189
179/* 190/*
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
new file mode 100644
index 000000000000..365dd57ea760
--- /dev/null
+++ b/fs/xfs/xfs_pnfs.c
@@ -0,0 +1,324 @@
1/*
2 * Copyright (c) 2014 Christoph Hellwig.
3 */
4#include "xfs.h"
5#include "xfs_format.h"
6#include "xfs_log_format.h"
7#include "xfs_trans_resv.h"
8#include "xfs_sb.h"
9#include "xfs_mount.h"
10#include "xfs_inode.h"
11#include "xfs_trans.h"
12#include "xfs_log.h"
13#include "xfs_bmap.h"
14#include "xfs_bmap_util.h"
15#include "xfs_error.h"
16#include "xfs_iomap.h"
17#include "xfs_shared.h"
18#include "xfs_bit.h"
19#include "xfs_pnfs.h"
20
21/*
22 * Ensure that we do not have any outstanding pNFS layouts that can be used by
23 * clients to directly read from or write to this inode. This must be called
24 * before every operation that can remove blocks from the extent map.
25 * Additionally we call it during the write operation, where aren't concerned
26 * about exposing unallocated blocks but just want to provide basic
27 * synchronization between a local writer and pNFS clients. mmap writes would
28 * also benefit from this sort of synchronization, but due to the tricky locking
29 * rules in the page fault path we don't bother.
30 */
31int
32xfs_break_layouts(
33 struct inode *inode,
34 uint *iolock)
35{
36 struct xfs_inode *ip = XFS_I(inode);
37 int error;
38
39 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
40
41 while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
42 xfs_iunlock(ip, *iolock);
43 error = break_layout(inode, true);
44 *iolock = XFS_IOLOCK_EXCL;
45 xfs_ilock(ip, *iolock);
46 }
47
48 return error;
49}
50
51/*
52 * Get a unique ID including its location so that the client can identify
53 * the exported device.
54 */
55int
56xfs_fs_get_uuid(
57 struct super_block *sb,
58 u8 *buf,
59 u32 *len,
60 u64 *offset)
61{
62 struct xfs_mount *mp = XFS_M(sb);
63
64 printk_once(KERN_NOTICE
65"XFS (%s): using experimental pNFS feature, use at your own risk!\n",
66 mp->m_fsname);
67
68 if (*len < sizeof(uuid_t))
69 return -EINVAL;
70
71 memcpy(buf, &mp->m_sb.sb_uuid, sizeof(uuid_t));
72 *len = sizeof(uuid_t);
73 *offset = offsetof(struct xfs_dsb, sb_uuid);
74 return 0;
75}
76
77static void
78xfs_bmbt_to_iomap(
79 struct xfs_inode *ip,
80 struct iomap *iomap,
81 struct xfs_bmbt_irec *imap)
82{
83 struct xfs_mount *mp = ip->i_mount;
84
85 if (imap->br_startblock == HOLESTARTBLOCK) {
86 iomap->blkno = IOMAP_NULL_BLOCK;
87 iomap->type = IOMAP_HOLE;
88 } else if (imap->br_startblock == DELAYSTARTBLOCK) {
89 iomap->blkno = IOMAP_NULL_BLOCK;
90 iomap->type = IOMAP_DELALLOC;
91 } else {
92 iomap->blkno =
93 XFS_FSB_TO_DADDR(ip->i_mount, imap->br_startblock);
94 if (imap->br_state == XFS_EXT_UNWRITTEN)
95 iomap->type = IOMAP_UNWRITTEN;
96 else
97 iomap->type = IOMAP_MAPPED;
98 }
99 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
100 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
101}
102
103/*
104 * Get a layout for the pNFS client.
105 */
106int
107xfs_fs_map_blocks(
108 struct inode *inode,
109 loff_t offset,
110 u64 length,
111 struct iomap *iomap,
112 bool write,
113 u32 *device_generation)
114{
115 struct xfs_inode *ip = XFS_I(inode);
116 struct xfs_mount *mp = ip->i_mount;
117 struct xfs_bmbt_irec imap;
118 xfs_fileoff_t offset_fsb, end_fsb;
119 loff_t limit;
120 int bmapi_flags = XFS_BMAPI_ENTIRE;
121 int nimaps = 1;
122 uint lock_flags;
123 int error = 0;
124
125 if (XFS_FORCED_SHUTDOWN(mp))
126 return -EIO;
127
128 /*
129 * We can't export inodes residing on the realtime device. The realtime
130 * device doesn't have a UUID to identify it, so the client has no way
131 * to find it.
132 */
133 if (XFS_IS_REALTIME_INODE(ip))
134 return -ENXIO;
135
136 /*
137 * Lock out any other I/O before we flush and invalidate the pagecache,
138 * and then hand out a layout to the remote system. This is very
139 * similar to direct I/O, except that the synchronization is much more
140 * complicated. See the comment near xfs_break_layouts for a detailed
141 * explanation.
142 */
143 xfs_ilock(ip, XFS_IOLOCK_EXCL);
144
145 error = -EINVAL;
146 limit = mp->m_super->s_maxbytes;
147 if (!write)
148 limit = max(limit, round_up(i_size_read(inode),
149 inode->i_sb->s_blocksize));
150 if (offset > limit)
151 goto out_unlock;
152 if (offset > limit - length)
153 length = limit - offset;
154
155 error = filemap_write_and_wait(inode->i_mapping);
156 if (error)
157 goto out_unlock;
158 error = invalidate_inode_pages2(inode->i_mapping);
159 if (WARN_ON_ONCE(error))
160 return error;
161
162 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
163 offset_fsb = XFS_B_TO_FSBT(mp, offset);
164
165 lock_flags = xfs_ilock_data_map_shared(ip);
166 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
167 &imap, &nimaps, bmapi_flags);
168 xfs_iunlock(ip, lock_flags);
169
170 if (error)
171 goto out_unlock;
172
173 if (write) {
174 enum xfs_prealloc_flags flags = 0;
175
176 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
177
178 if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) {
179 error = xfs_iomap_write_direct(ip, offset, length,
180 &imap, nimaps);
181 if (error)
182 goto out_unlock;
183
184 /*
185 * Ensure the next transaction is committed
186 * synchronously so that the blocks allocated and
187 * handed out to the client are guaranteed to be
188 * present even after a server crash.
189 */
190 flags |= XFS_PREALLOC_SET | XFS_PREALLOC_SYNC;
191 }
192
193 error = xfs_update_prealloc_flags(ip, flags);
194 if (error)
195 goto out_unlock;
196 }
197 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
198
199 xfs_bmbt_to_iomap(ip, iomap, &imap);
200 *device_generation = mp->m_generation;
201 return error;
202out_unlock:
203 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
204 return error;
205}
206
207/*
208 * Ensure the size update falls into a valid allocated block.
209 */
210static int
211xfs_pnfs_validate_isize(
212 struct xfs_inode *ip,
213 xfs_off_t isize)
214{
215 struct xfs_bmbt_irec imap;
216 int nimaps = 1;
217 int error = 0;
218
219 xfs_ilock(ip, XFS_ILOCK_SHARED);
220 error = xfs_bmapi_read(ip, XFS_B_TO_FSBT(ip->i_mount, isize - 1), 1,
221 &imap, &nimaps, 0);
222 xfs_iunlock(ip, XFS_ILOCK_SHARED);
223 if (error)
224 return error;
225
226 if (imap.br_startblock == HOLESTARTBLOCK ||
227 imap.br_startblock == DELAYSTARTBLOCK ||
228 imap.br_state == XFS_EXT_UNWRITTEN)
229 return -EIO;
230 return 0;
231}
232
233/*
234 * Make sure the blocks described by maps are stable on disk. This includes
235 * converting any unwritten extents, flushing the disk cache and updating the
236 * time stamps.
237 *
238 * Note that we rely on the caller to always send us a timestamp update so that
239 * we always commit a transaction here. If that stops being true we will have
240 * to manually flush the cache here similar to what the fsync code path does
241 * for datasyncs on files that have no dirty metadata.
242 */
243int
244xfs_fs_commit_blocks(
245 struct inode *inode,
246 struct iomap *maps,
247 int nr_maps,
248 struct iattr *iattr)
249{
250 struct xfs_inode *ip = XFS_I(inode);
251 struct xfs_mount *mp = ip->i_mount;
252 struct xfs_trans *tp;
253 bool update_isize = false;
254 int error, i;
255 loff_t size;
256
257 ASSERT(iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME));
258
259 xfs_ilock(ip, XFS_IOLOCK_EXCL);
260
261 size = i_size_read(inode);
262 if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size > size) {
263 update_isize = true;
264 size = iattr->ia_size;
265 }
266
267 for (i = 0; i < nr_maps; i++) {
268 u64 start, length, end;
269
270 start = maps[i].offset;
271 if (start > size)
272 continue;
273
274 end = start + maps[i].length;
275 if (end > size)
276 end = size;
277
278 length = end - start;
279 if (!length)
280 continue;
281
282 /*
283 * Make sure reads through the pagecache see the new data.
284 */
285 error = invalidate_inode_pages2_range(inode->i_mapping,
286 start >> PAGE_CACHE_SHIFT,
287 (end - 1) >> PAGE_CACHE_SHIFT);
288 WARN_ON_ONCE(error);
289
290 error = xfs_iomap_write_unwritten(ip, start, length);
291 if (error)
292 goto out_drop_iolock;
293 }
294
295 if (update_isize) {
296 error = xfs_pnfs_validate_isize(ip, size);
297 if (error)
298 goto out_drop_iolock;
299 }
300
301 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
302 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
303 if (error) {
304 xfs_trans_cancel(tp, 0);
305 goto out_drop_iolock;
306 }
307
308 xfs_ilock(ip, XFS_ILOCK_EXCL);
309 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
310 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
311
312 xfs_setattr_time(ip, iattr);
313 if (update_isize) {
314 i_size_write(inode, iattr->ia_size);
315 ip->i_d.di_size = iattr->ia_size;
316 }
317
318 xfs_trans_set_sync(tp);
319 error = xfs_trans_commit(tp, 0);
320
321out_drop_iolock:
322 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
323 return error;
324}
diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h
new file mode 100644
index 000000000000..b7fbfce660f6
--- /dev/null
+++ b/fs/xfs/xfs_pnfs.h
@@ -0,0 +1,18 @@
1#ifndef _XFS_PNFS_H
2#define _XFS_PNFS_H 1
3
4#ifdef CONFIG_NFSD_PNFS
5int xfs_fs_get_uuid(struct super_block *sb, u8 *buf, u32 *len, u64 *offset);
6int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length,
7 struct iomap *iomap, bool write, u32 *device_generation);
8int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps,
9 struct iattr *iattr);
10
11int xfs_break_layouts(struct inode *inode, uint *iolock);
12#else
13static inline int xfs_break_layouts(struct inode *inode, uint *iolock)
14{
15 return 0;
16}
17#endif /* CONFIG_NFSD_PNFS */
18#endif /* _XFS_PNFS_H */
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 53cc2aaf8d2b..fbbb9e62e274 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -836,6 +836,11 @@ xfs_qm_reset_dqcounts(
836 */ 836 */
837 xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, 837 xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
838 "xfs_quotacheck"); 838 "xfs_quotacheck");
839 /*
840 * Reset type in case we are reusing group quota file for
841 * project quotas or vice versa
842 */
843 ddq->d_flags = type;
839 ddq->d_bcount = 0; 844 ddq->d_bcount = 0;
840 ddq->d_icount = 0; 845 ddq->d_icount = 0;
841 ddq->d_rtbcount = 0; 846 ddq->d_rtbcount = 0;
diff --git a/include/acpi/acpi_lpat.h b/include/acpi/acpi_lpat.h
new file mode 100644
index 000000000000..da37e12d23e2
--- /dev/null
+++ b/include/acpi/acpi_lpat.h
@@ -0,0 +1,65 @@
1/*
2 * acpi_lpat.h - LPAT table processing functions
3 *
4 * Copyright (C) 2015 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef ACPI_LPAT_H
17#define ACPI_LPAT_H
18
19struct acpi_lpat {
20 int temp;
21 int raw;
22};
23
24struct acpi_lpat_conversion_table {
25 struct acpi_lpat *lpat;
26 int lpat_count;
27};
28
29#ifdef CONFIG_ACPI
30
31int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
32 int raw);
33int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
34 int temp);
35struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(acpi_handle
36 handle);
37void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
38 *lpat_table);
39
40#else
41static int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
42 int raw)
43{
44 return 0;
45}
46
47static int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
48 int temp)
49{
50 return 0;
51}
52
53static struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(
54 acpi_handle handle)
55{
56 return NULL;
57}
58
59static void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
60 *lpat_table)
61{
62}
63
64#endif
65#endif
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index ce37349860fe..7389c87116a0 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -15,6 +15,9 @@ struct pci_dev;
15#ifdef CONFIG_PCI 15#ifdef CONFIG_PCI
16/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ 16/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
17extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); 17extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
18extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
19 unsigned long offset,
20 unsigned long maxlen);
18/* Create a virtual mapping cookie for a port on a given PCI device. 21/* Create a virtual mapping cookie for a port on a given PCI device.
19 * Do not call this directly, it exists to make it easier for architectures 22 * Do not call this directly, it exists to make it easier for architectures
20 * to override */ 23 * to override */
@@ -30,6 +33,13 @@ static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned lon
30{ 33{
31 return NULL; 34 return NULL;
32} 35}
36
37static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
38 unsigned long offset,
39 unsigned long maxlen)
40{
41 return NULL;
42}
33#endif 43#endif
34 44
35#endif /* __ASM_GENERIC_IO_H */ 45#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 180ad0e6de21..d016dc57f007 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -214,9 +214,9 @@
214 INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info) 214 INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
215 215
216#define _INTEL_BDW_M_IDS(gt, info) \ 216#define _INTEL_BDW_M_IDS(gt, info) \
217 _INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \ 217 _INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \
218 _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \ 218 _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
219 _INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \ 219 _INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \
220 _INTEL_BDW_M(gt, 0x160E, info) /* ULX */ 220 _INTEL_BDW_M(gt, 0x160E, info) /* ULX */
221 221
222#define _INTEL_BDW_D_IDS(gt, info) \ 222#define _INTEL_BDW_D_IDS(gt, info) \
diff --git a/include/dt-bindings/clock/alphascale,asm9260.h b/include/dt-bindings/clock/alphascale,asm9260.h
new file mode 100644
index 000000000000..04e8db27daf0
--- /dev/null
+++ b/include/dt-bindings/clock/alphascale,asm9260.h
@@ -0,0 +1,97 @@
1/*
2 * Copyright 2014 Oleksij Rempel <linux@rempel-privat.de>
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _DT_BINDINGS_CLK_ASM9260_H
15#define _DT_BINDINGS_CLK_ASM9260_H
16
17/* ahb gate */
18#define CLKID_AHB_ROM 0
19#define CLKID_AHB_RAM 1
20#define CLKID_AHB_GPIO 2
21#define CLKID_AHB_MAC 3
22#define CLKID_AHB_EMI 4
23#define CLKID_AHB_USB0 5
24#define CLKID_AHB_USB1 6
25#define CLKID_AHB_DMA0 7
26#define CLKID_AHB_DMA1 8
27#define CLKID_AHB_UART0 9
28#define CLKID_AHB_UART1 10
29#define CLKID_AHB_UART2 11
30#define CLKID_AHB_UART3 12
31#define CLKID_AHB_UART4 13
32#define CLKID_AHB_UART5 14
33#define CLKID_AHB_UART6 15
34#define CLKID_AHB_UART7 16
35#define CLKID_AHB_UART8 17
36#define CLKID_AHB_UART9 18
37#define CLKID_AHB_I2S0 19
38#define CLKID_AHB_I2C0 20
39#define CLKID_AHB_I2C1 21
40#define CLKID_AHB_SSP0 22
41#define CLKID_AHB_IOCONFIG 23
42#define CLKID_AHB_WDT 24
43#define CLKID_AHB_CAN0 25
44#define CLKID_AHB_CAN1 26
45#define CLKID_AHB_MPWM 27
46#define CLKID_AHB_SPI0 28
47#define CLKID_AHB_SPI1 29
48#define CLKID_AHB_QEI 30
49#define CLKID_AHB_QUADSPI0 31
50#define CLKID_AHB_CAMIF 32
51#define CLKID_AHB_LCDIF 33
52#define CLKID_AHB_TIMER0 34
53#define CLKID_AHB_TIMER1 35
54#define CLKID_AHB_TIMER2 36
55#define CLKID_AHB_TIMER3 37
56#define CLKID_AHB_IRQ 38
57#define CLKID_AHB_RTC 39
58#define CLKID_AHB_NAND 40
59#define CLKID_AHB_ADC0 41
60#define CLKID_AHB_LED 42
61#define CLKID_AHB_DAC0 43
62#define CLKID_AHB_LCD 44
63#define CLKID_AHB_I2S1 45
64#define CLKID_AHB_MAC1 46
65
66/* devider */
67#define CLKID_SYS_CPU 47
68#define CLKID_SYS_AHB 48
69#define CLKID_SYS_I2S0M 49
70#define CLKID_SYS_I2S0S 50
71#define CLKID_SYS_I2S1M 51
72#define CLKID_SYS_I2S1S 52
73#define CLKID_SYS_UART0 53
74#define CLKID_SYS_UART1 54
75#define CLKID_SYS_UART2 55
76#define CLKID_SYS_UART3 56
77#define CLKID_SYS_UART4 56
78#define CLKID_SYS_UART5 57
79#define CLKID_SYS_UART6 58
80#define CLKID_SYS_UART7 59
81#define CLKID_SYS_UART8 60
82#define CLKID_SYS_UART9 61
83#define CLKID_SYS_SPI0 62
84#define CLKID_SYS_SPI1 63
85#define CLKID_SYS_QUADSPI 64
86#define CLKID_SYS_SSP0 65
87#define CLKID_SYS_NAND 66
88#define CLKID_SYS_TRACE 67
89#define CLKID_SYS_CAMM 68
90#define CLKID_SYS_WDT 69
91#define CLKID_SYS_CLKOUT 70
92#define CLKID_SYS_MAC 71
93#define CLKID_SYS_LCD 72
94#define CLKID_SYS_ADCANA 73
95
96#define MAX_CLKS 74
97#endif
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index 34fe28c622d0..c4b1676ea674 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -262,8 +262,13 @@
262#define CLK_DIV_MCUISP1 453 /* Exynos4x12 only */ 262#define CLK_DIV_MCUISP1 453 /* Exynos4x12 only */
263#define CLK_DIV_ACLK200 454 /* Exynos4x12 only */ 263#define CLK_DIV_ACLK200 454 /* Exynos4x12 only */
264#define CLK_DIV_ACLK400_MCUISP 455 /* Exynos4x12 only */ 264#define CLK_DIV_ACLK400_MCUISP 455 /* Exynos4x12 only */
265#define CLK_DIV_ACP 456
266#define CLK_DIV_DMC 457
267#define CLK_DIV_C2C 458 /* Exynos4x12 only */
268#define CLK_DIV_GDL 459
269#define CLK_DIV_GDR 460
265 270
266/* must be greater than maximal clock id */ 271/* must be greater than maximal clock id */
267#define CLK_NR_CLKS 456 272#define CLK_NR_CLKS 461
268 273
269#endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */ 274#endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */
diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h
index 8e4681b07ae7..e33c75a3c09d 100644
--- a/include/dt-bindings/clock/exynos7-clk.h
+++ b/include/dt-bindings/clock/exynos7-clk.h
@@ -17,7 +17,11 @@
17#define DOUT_SCLK_CC_PLL 4 17#define DOUT_SCLK_CC_PLL 4
18#define DOUT_SCLK_MFC_PLL 5 18#define DOUT_SCLK_MFC_PLL 5
19#define DOUT_ACLK_CCORE_133 6 19#define DOUT_ACLK_CCORE_133 6
20#define TOPC_NR_CLK 7 20#define DOUT_ACLK_MSCL_532 7
21#define ACLK_MSCL_532 8
22#define DOUT_SCLK_AUD_PLL 9
23#define FOUT_AUD_PLL 10
24#define TOPC_NR_CLK 11
21 25
22/* TOP0 */ 26/* TOP0 */
23#define DOUT_ACLK_PERIC1 1 27#define DOUT_ACLK_PERIC1 1
@@ -26,7 +30,15 @@
26#define CLK_SCLK_UART1 4 30#define CLK_SCLK_UART1 4
27#define CLK_SCLK_UART2 5 31#define CLK_SCLK_UART2 5
28#define CLK_SCLK_UART3 6 32#define CLK_SCLK_UART3 6
29#define TOP0_NR_CLK 7 33#define CLK_SCLK_SPI0 7
34#define CLK_SCLK_SPI1 8
35#define CLK_SCLK_SPI2 9
36#define CLK_SCLK_SPI3 10
37#define CLK_SCLK_SPI4 11
38#define CLK_SCLK_SPDIF 12
39#define CLK_SCLK_PCM1 13
40#define CLK_SCLK_I2S1 14
41#define TOP0_NR_CLK 15
30 42
31/* TOP1 */ 43/* TOP1 */
32#define DOUT_ACLK_FSYS1_200 1 44#define DOUT_ACLK_FSYS1_200 1
@@ -70,7 +82,23 @@
70#define PCLK_HSI2C6 9 82#define PCLK_HSI2C6 9
71#define PCLK_HSI2C7 10 83#define PCLK_HSI2C7 10
72#define PCLK_HSI2C8 11 84#define PCLK_HSI2C8 11
73#define PERIC1_NR_CLK 12 85#define PCLK_SPI0 12
86#define PCLK_SPI1 13
87#define PCLK_SPI2 14
88#define PCLK_SPI3 15
89#define PCLK_SPI4 16
90#define SCLK_SPI0 17
91#define SCLK_SPI1 18
92#define SCLK_SPI2 19
93#define SCLK_SPI3 20
94#define SCLK_SPI4 21
95#define PCLK_I2S1 22
96#define PCLK_PCM1 23
97#define PCLK_SPDIF 24
98#define SCLK_I2S1 25
99#define SCLK_PCM1 26
100#define SCLK_SPDIF 27
101#define PERIC1_NR_CLK 28
74 102
75/* PERIS */ 103/* PERIS */
76#define PCLK_CHIPID 1 104#define PCLK_CHIPID 1
@@ -82,11 +110,63 @@
82 110
83/* FSYS0 */ 111/* FSYS0 */
84#define ACLK_MMC2 1 112#define ACLK_MMC2 1
85#define FSYS0_NR_CLK 2 113#define ACLK_AXIUS_USBDRD30X_FSYS0X 2
114#define ACLK_USBDRD300 3
115#define SCLK_USBDRD300_SUSPENDCLK 4
116#define SCLK_USBDRD300_REFCLK 5
117#define PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER 6
118#define PHYCLK_USBDRD300_UDRD30_PHYCLK_USER 7
119#define OSCCLK_PHY_CLKOUT_USB30_PHY 8
120#define ACLK_PDMA0 9
121#define ACLK_PDMA1 10
122#define FSYS0_NR_CLK 11
86 123
87/* FSYS1 */ 124/* FSYS1 */
88#define ACLK_MMC1 1 125#define ACLK_MMC1 1
89#define ACLK_MMC0 2 126#define ACLK_MMC0 2
90#define FSYS1_NR_CLK 3 127#define FSYS1_NR_CLK 3
91 128
129/* MSCL */
130#define USERMUX_ACLK_MSCL_532 1
131#define DOUT_PCLK_MSCL 2
132#define ACLK_MSCL_0 3
133#define ACLK_MSCL_1 4
134#define ACLK_JPEG 5
135#define ACLK_G2D 6
136#define ACLK_LH_ASYNC_SI_MSCL_0 7
137#define ACLK_LH_ASYNC_SI_MSCL_1 8
138#define ACLK_AXI2ACEL_BRIDGE 9
139#define ACLK_XIU_MSCLX_0 10
140#define ACLK_XIU_MSCLX_1 11
141#define ACLK_QE_MSCL_0 12
142#define ACLK_QE_MSCL_1 13
143#define ACLK_QE_JPEG 14
144#define ACLK_QE_G2D 15
145#define ACLK_PPMU_MSCL_0 16
146#define ACLK_PPMU_MSCL_1 17
147#define ACLK_MSCLNP_133 18
148#define ACLK_AHB2APB_MSCL0P 19
149#define ACLK_AHB2APB_MSCL1P 20
150
151#define PCLK_MSCL_0 21
152#define PCLK_MSCL_1 22
153#define PCLK_JPEG 23
154#define PCLK_G2D 24
155#define PCLK_QE_MSCL_0 25
156#define PCLK_QE_MSCL_1 26
157#define PCLK_QE_JPEG 27
158#define PCLK_QE_G2D 28
159#define PCLK_PPMU_MSCL_0 29
160#define PCLK_PPMU_MSCL_1 30
161#define PCLK_AXI2ACEL_BRIDGE 31
162#define PCLK_PMU_MSCL 32
163#define MSCL_NR_CLK 33
164
165/* AUD */
166#define SCLK_I2S 1
167#define SCLK_PCM 2
168#define PCLK_I2S 3
169#define PCLK_PCM 4
170#define ACLK_ADMA 5
171#define AUD_NR_CLK 6
92#endif /* _DT_BINDINGS_CLOCK_EXYNOS7_H */ 172#endif /* _DT_BINDINGS_CLOCK_EXYNOS7_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
index b857cadb0bd4..04fb29ae30e6 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
@@ -238,7 +238,6 @@
238#define PLL0_VOTE 221 238#define PLL0_VOTE 221
239#define PLL3 222 239#define PLL3 222
240#define PLL3_VOTE 223 240#define PLL3_VOTE 223
241#define PLL4 224
242#define PLL4_VOTE 225 241#define PLL4_VOTE 225
243#define PLL8 226 242#define PLL8 226
244#define PLL8_VOTE 227 243#define PLL8_VOTE 227
diff --git a/include/dt-bindings/clock/qcom,lcc-ipq806x.h b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
new file mode 100644
index 000000000000..4e944b85c56d
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _DT_BINDINGS_CLK_LCC_IPQ806X_H
15#define _DT_BINDINGS_CLK_LCC_IPQ806X_H
16
17#define PLL4 0
18#define MI2S_OSR_SRC 1
19#define MI2S_OSR_CLK 2
20#define MI2S_DIV_CLK 3
21#define MI2S_BIT_DIV_CLK 4
22#define MI2S_BIT_CLK 5
23#define PCM_SRC 6
24#define PCM_CLK_OUT 7
25#define PCM_CLK 8
26#define SPDIF_SRC 9
27#define SPDIF_CLK 10
28#define AHBIX_CLK 11
29
30#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-msm8960.h b/include/dt-bindings/clock/qcom,lcc-msm8960.h
new file mode 100644
index 000000000000..4fb2aa64d9fe
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lcc-msm8960.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _DT_BINDINGS_CLK_LCC_MSM8960_H
15#define _DT_BINDINGS_CLK_LCC_MSM8960_H
16
17#define PLL4 0
18#define MI2S_OSR_SRC 1
19#define MI2S_OSR_CLK 2
20#define MI2S_DIV_CLK 3
21#define MI2S_BIT_DIV_CLK 4
22#define MI2S_BIT_CLK 5
23#define PCM_SRC 6
24#define PCM_CLK_OUT 7
25#define PCM_CLK 8
26#define SLIMBUS_SRC 9
27#define AUDIO_SLIMBUS_CLK 10
28#define SPS_SLIMBUS_CLK 11
29#define CODEC_I2S_MIC_OSR_SRC 12
30#define CODEC_I2S_MIC_OSR_CLK 13
31#define CODEC_I2S_MIC_DIV_CLK 14
32#define CODEC_I2S_MIC_BIT_DIV_CLK 15
33#define CODEC_I2S_MIC_BIT_CLK 16
34#define SPARE_I2S_MIC_OSR_SRC 17
35#define SPARE_I2S_MIC_OSR_CLK 18
36#define SPARE_I2S_MIC_DIV_CLK 19
37#define SPARE_I2S_MIC_BIT_DIV_CLK 20
38#define SPARE_I2S_MIC_BIT_CLK 21
39#define CODEC_I2S_SPKR_OSR_SRC 22
40#define CODEC_I2S_SPKR_OSR_CLK 23
41#define CODEC_I2S_SPKR_DIV_CLK 24
42#define CODEC_I2S_SPKR_BIT_DIV_CLK 25
43#define CODEC_I2S_SPKR_BIT_CLK 26
44#define SPARE_I2S_SPKR_OSR_SRC 27
45#define SPARE_I2S_SPKR_OSR_CLK 28
46#define SPARE_I2S_SPKR_DIV_CLK 29
47#define SPARE_I2S_SPKR_BIT_DIV_CLK 30
48#define SPARE_I2S_SPKR_BIT_CLK 31
49
50#endif
diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
new file mode 100644
index 000000000000..ae2eb17a1658
--- /dev/null
+++ b/include/dt-bindings/clock/tegra124-car-common.h
@@ -0,0 +1,345 @@
1/*
2 * This header provides constants for binding nvidia,tegra124-car or
3 * nvidia,tegra132-car.
4 *
5 * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB
6 * registers. These IDs often match those in the CAR's RST_DEVICES registers,
7 * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In
8 * this case, those clocks are assigned IDs above 185 in order to highlight
9 * this issue. Implementations that interpret these clock IDs as bit values
10 * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to
11 * explicitly handle these special cases.
12 *
13 * The balance of the clocks controlled by the CAR are assigned IDs of 185 and
14 * above.
15 */
16
17#ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H
18#define _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H
19
20/* 0 */
21/* 1 */
22/* 2 */
23#define TEGRA124_CLK_ISPB 3
24#define TEGRA124_CLK_RTC 4
25#define TEGRA124_CLK_TIMER 5
26#define TEGRA124_CLK_UARTA 6
27/* 7 (register bit affects uartb and vfir) */
28/* 8 */
29#define TEGRA124_CLK_SDMMC2 9
30/* 10 (register bit affects spdif_in and spdif_out) */
31#define TEGRA124_CLK_I2S1 11
32#define TEGRA124_CLK_I2C1 12
33/* 13 */
34#define TEGRA124_CLK_SDMMC1 14
35#define TEGRA124_CLK_SDMMC4 15
36/* 16 */
37#define TEGRA124_CLK_PWM 17
38#define TEGRA124_CLK_I2S2 18
39/* 20 (register bit affects vi and vi_sensor) */
40/* 21 */
41#define TEGRA124_CLK_USBD 22
42#define TEGRA124_CLK_ISP 23
43/* 26 */
44/* 25 */
45#define TEGRA124_CLK_DISP2 26
46#define TEGRA124_CLK_DISP1 27
47#define TEGRA124_CLK_HOST1X 28
48#define TEGRA124_CLK_VCP 29
49#define TEGRA124_CLK_I2S0 30
50/* 31 */
51
52#define TEGRA124_CLK_MC 32
53/* 33 */
54#define TEGRA124_CLK_APBDMA 34
55/* 35 */
56#define TEGRA124_CLK_KBC 36
57/* 37 */
58/* 38 */
59/* 39 (register bit affects fuse and fuse_burn) */
60#define TEGRA124_CLK_KFUSE 40
61#define TEGRA124_CLK_SBC1 41
62#define TEGRA124_CLK_NOR 42
63/* 43 */
64#define TEGRA124_CLK_SBC2 44
65/* 45 */
66#define TEGRA124_CLK_SBC3 46
67#define TEGRA124_CLK_I2C5 47
68#define TEGRA124_CLK_DSIA 48
69/* 49 */
70#define TEGRA124_CLK_MIPI 50
71#define TEGRA124_CLK_HDMI 51
72#define TEGRA124_CLK_CSI 52
73/* 53 */
74#define TEGRA124_CLK_I2C2 54
75#define TEGRA124_CLK_UARTC 55
76#define TEGRA124_CLK_MIPI_CAL 56
77#define TEGRA124_CLK_EMC 57
78#define TEGRA124_CLK_USB2 58
79#define TEGRA124_CLK_USB3 59
80/* 60 */
81#define TEGRA124_CLK_VDE 61
82#define TEGRA124_CLK_BSEA 62
83#define TEGRA124_CLK_BSEV 63
84
85/* 64 */
86#define TEGRA124_CLK_UARTD 65
87/* 66 */
88#define TEGRA124_CLK_I2C3 67
89#define TEGRA124_CLK_SBC4 68
90#define TEGRA124_CLK_SDMMC3 69
91#define TEGRA124_CLK_PCIE 70
92#define TEGRA124_CLK_OWR 71
93#define TEGRA124_CLK_AFI 72
94#define TEGRA124_CLK_CSITE 73
95/* 74 */
96/* 75 */
97#define TEGRA124_CLK_LA 76
98#define TEGRA124_CLK_TRACE 77
99#define TEGRA124_CLK_SOC_THERM 78
100#define TEGRA124_CLK_DTV 79
101/* 80 */
102#define TEGRA124_CLK_I2CSLOW 81
103#define TEGRA124_CLK_DSIB 82
104#define TEGRA124_CLK_TSEC 83
105/* 84 */
106/* 85 */
107/* 86 */
108/* 87 */
109/* 88 */
110#define TEGRA124_CLK_XUSB_HOST 89
111/* 90 */
112#define TEGRA124_CLK_MSENC 91
113#define TEGRA124_CLK_CSUS 92
114/* 93 */
115/* 94 */
116/* 95 (bit affects xusb_dev and xusb_dev_src) */
117
118/* 96 */
119/* 97 */
120/* 98 */
121#define TEGRA124_CLK_MSELECT 99
122#define TEGRA124_CLK_TSENSOR 100
123#define TEGRA124_CLK_I2S3 101
124#define TEGRA124_CLK_I2S4 102
125#define TEGRA124_CLK_I2C4 103
126#define TEGRA124_CLK_SBC5 104
127#define TEGRA124_CLK_SBC6 105
128#define TEGRA124_CLK_D_AUDIO 106
129#define TEGRA124_CLK_APBIF 107
130#define TEGRA124_CLK_DAM0 108
131#define TEGRA124_CLK_DAM1 109
132#define TEGRA124_CLK_DAM2 110
133#define TEGRA124_CLK_HDA2CODEC_2X 111
134/* 112 */
135#define TEGRA124_CLK_AUDIO0_2X 113
136#define TEGRA124_CLK_AUDIO1_2X 114
137#define TEGRA124_CLK_AUDIO2_2X 115
138#define TEGRA124_CLK_AUDIO3_2X 116
139#define TEGRA124_CLK_AUDIO4_2X 117
140#define TEGRA124_CLK_SPDIF_2X 118
141#define TEGRA124_CLK_ACTMON 119
142#define TEGRA124_CLK_EXTERN1 120
143#define TEGRA124_CLK_EXTERN2 121
144#define TEGRA124_CLK_EXTERN3 122
145#define TEGRA124_CLK_SATA_OOB 123
146#define TEGRA124_CLK_SATA 124
147#define TEGRA124_CLK_HDA 125
148/* 126 */
149#define TEGRA124_CLK_SE 127
150
151#define TEGRA124_CLK_HDA2HDMI 128
152#define TEGRA124_CLK_SATA_COLD 129
153/* 130 */
154/* 131 */
155/* 132 */
156/* 133 */
157/* 134 */
158/* 135 */
159/* 136 */
160/* 137 */
161/* 138 */
162/* 139 */
163/* 140 */
164/* 141 */
165/* 142 */
166/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */
167/* xusb_host_src and xusb_ss_src) */
168#define TEGRA124_CLK_CILAB 144
169#define TEGRA124_CLK_CILCD 145
170#define TEGRA124_CLK_CILE 146
171#define TEGRA124_CLK_DSIALP 147
172#define TEGRA124_CLK_DSIBLP 148
173#define TEGRA124_CLK_ENTROPY 149
174#define TEGRA124_CLK_DDS 150
175/* 151 */
176#define TEGRA124_CLK_DP2 152
177#define TEGRA124_CLK_AMX 153
178#define TEGRA124_CLK_ADX 154
179/* 155 (bit affects dfll_ref and dfll_soc) */
180#define TEGRA124_CLK_XUSB_SS 156
181/* 157 */
182/* 158 */
183/* 159 */
184
185/* 160 */
186/* 161 */
187/* 162 */
188/* 163 */
189/* 164 */
190/* 165 */
191#define TEGRA124_CLK_I2C6 166
192/* 167 */
193/* 168 */
194/* 169 */
195/* 170 */
196#define TEGRA124_CLK_VIM2_CLK 171
197/* 172 */
198/* 173 */
199/* 174 */
200/* 175 */
201#define TEGRA124_CLK_HDMI_AUDIO 176
202#define TEGRA124_CLK_CLK72MHZ 177
203#define TEGRA124_CLK_VIC03 178
204/* 179 */
205#define TEGRA124_CLK_ADX1 180
206#define TEGRA124_CLK_DPAUX 181
207#define TEGRA124_CLK_SOR0 182
208/* 183 */
209#define TEGRA124_CLK_GPU 184
210#define TEGRA124_CLK_AMX1 185
211/* 186 */
212/* 187 */
213/* 188 */
214/* 189 */
215/* 190 */
216/* 191 */
217#define TEGRA124_CLK_UARTB 192
218#define TEGRA124_CLK_VFIR 193
219#define TEGRA124_CLK_SPDIF_IN 194
220#define TEGRA124_CLK_SPDIF_OUT 195
221#define TEGRA124_CLK_VI 196
222#define TEGRA124_CLK_VI_SENSOR 197
223#define TEGRA124_CLK_FUSE 198
224#define TEGRA124_CLK_FUSE_BURN 199
225#define TEGRA124_CLK_CLK_32K 200
226#define TEGRA124_CLK_CLK_M 201
227#define TEGRA124_CLK_CLK_M_DIV2 202
228#define TEGRA124_CLK_CLK_M_DIV4 203
229#define TEGRA124_CLK_PLL_REF 204
230#define TEGRA124_CLK_PLL_C 205
231#define TEGRA124_CLK_PLL_C_OUT1 206
232#define TEGRA124_CLK_PLL_C2 207
233#define TEGRA124_CLK_PLL_C3 208
234#define TEGRA124_CLK_PLL_M 209
235#define TEGRA124_CLK_PLL_M_OUT1 210
236#define TEGRA124_CLK_PLL_P 211
237#define TEGRA124_CLK_PLL_P_OUT1 212
238#define TEGRA124_CLK_PLL_P_OUT2 213
239#define TEGRA124_CLK_PLL_P_OUT3 214
240#define TEGRA124_CLK_PLL_P_OUT4 215
241#define TEGRA124_CLK_PLL_A 216
242#define TEGRA124_CLK_PLL_A_OUT0 217
243#define TEGRA124_CLK_PLL_D 218
244#define TEGRA124_CLK_PLL_D_OUT0 219
245#define TEGRA124_CLK_PLL_D2 220
246#define TEGRA124_CLK_PLL_D2_OUT0 221
247#define TEGRA124_CLK_PLL_U 222
248#define TEGRA124_CLK_PLL_U_480M 223
249
250#define TEGRA124_CLK_PLL_U_60M 224
251#define TEGRA124_CLK_PLL_U_48M 225
252#define TEGRA124_CLK_PLL_U_12M 226
253/* 227 */
254/* 228 */
255#define TEGRA124_CLK_PLL_RE_VCO 229
256#define TEGRA124_CLK_PLL_RE_OUT 230
257#define TEGRA124_CLK_PLL_E 231
258#define TEGRA124_CLK_SPDIF_IN_SYNC 232
259#define TEGRA124_CLK_I2S0_SYNC 233
260#define TEGRA124_CLK_I2S1_SYNC 234
261#define TEGRA124_CLK_I2S2_SYNC 235
262#define TEGRA124_CLK_I2S3_SYNC 236
263#define TEGRA124_CLK_I2S4_SYNC 237
264#define TEGRA124_CLK_VIMCLK_SYNC 238
265#define TEGRA124_CLK_AUDIO0 239
266#define TEGRA124_CLK_AUDIO1 240
267#define TEGRA124_CLK_AUDIO2 241
268#define TEGRA124_CLK_AUDIO3 242
269#define TEGRA124_CLK_AUDIO4 243
270#define TEGRA124_CLK_SPDIF 244
271#define TEGRA124_CLK_CLK_OUT_1 245
272#define TEGRA124_CLK_CLK_OUT_2 246
273#define TEGRA124_CLK_CLK_OUT_3 247
274#define TEGRA124_CLK_BLINK 248
275/* 249 */
276/* 250 */
277/* 251 */
278#define TEGRA124_CLK_XUSB_HOST_SRC 252
279#define TEGRA124_CLK_XUSB_FALCON_SRC 253
280#define TEGRA124_CLK_XUSB_FS_SRC 254
281#define TEGRA124_CLK_XUSB_SS_SRC 255
282
283#define TEGRA124_CLK_XUSB_DEV_SRC 256
284#define TEGRA124_CLK_XUSB_DEV 257
285#define TEGRA124_CLK_XUSB_HS_SRC 258
286#define TEGRA124_CLK_SCLK 259
287#define TEGRA124_CLK_HCLK 260
288#define TEGRA124_CLK_PCLK 261
289/* 262 */
290/* 263 */
291#define TEGRA124_CLK_DFLL_REF 264
292#define TEGRA124_CLK_DFLL_SOC 265
293#define TEGRA124_CLK_VI_SENSOR2 266
294#define TEGRA124_CLK_PLL_P_OUT5 267
295#define TEGRA124_CLK_CML0 268
296#define TEGRA124_CLK_CML1 269
297#define TEGRA124_CLK_PLL_C4 270
298#define TEGRA124_CLK_PLL_DP 271
299#define TEGRA124_CLK_PLL_E_MUX 272
300#define TEGRA124_CLK_PLLD_DSI 273
301/* 274 */
302/* 275 */
303/* 276 */
304/* 277 */
305/* 278 */
306/* 279 */
307/* 280 */
308/* 281 */
309/* 282 */
310/* 283 */
311/* 284 */
312/* 285 */
313/* 286 */
314/* 287 */
315
316/* 288 */
317/* 289 */
318/* 290 */
319/* 291 */
320/* 292 */
321/* 293 */
322/* 294 */
323/* 295 */
324/* 296 */
325/* 297 */
326/* 298 */
327/* 299 */
328#define TEGRA124_CLK_AUDIO0_MUX 300
329#define TEGRA124_CLK_AUDIO1_MUX 301
330#define TEGRA124_CLK_AUDIO2_MUX 302
331#define TEGRA124_CLK_AUDIO3_MUX 303
332#define TEGRA124_CLK_AUDIO4_MUX 304
333#define TEGRA124_CLK_SPDIF_MUX 305
334#define TEGRA124_CLK_CLK_OUT_1_MUX 306
335#define TEGRA124_CLK_CLK_OUT_2_MUX 307
336#define TEGRA124_CLK_CLK_OUT_3_MUX 308
337/* 309 */
338/* 310 */
339#define TEGRA124_CLK_SOR0_LVDS 311
340#define TEGRA124_CLK_XUSB_SS_DIV2 312
341
342#define TEGRA124_CLK_PLL_M_UD 313
343#define TEGRA124_CLK_PLL_C_UD 314
344
345#endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H */
diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h
index af9bc9a3ddbc..2860737f0443 100644
--- a/include/dt-bindings/clock/tegra124-car.h
+++ b/include/dt-bindings/clock/tegra124-car.h
@@ -1,346 +1,19 @@
1/* 1/*
2 * This header provides constants for binding nvidia,tegra124-car. 2 * This header provides Tegra124-specific constants for binding
3 * 3 * nvidia,tegra124-car.
4 * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB
5 * registers. These IDs often match those in the CAR's RST_DEVICES registers,
6 * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In
7 * this case, those clocks are assigned IDs above 185 in order to highlight
8 * this issue. Implementations that interpret these clock IDs as bit values
9 * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to
10 * explicitly handle these special cases.
11 *
12 * The balance of the clocks controlled by the CAR are assigned IDs of 185 and
13 * above.
14 */ 4 */
15 5
6#include <dt-bindings/clock/tegra124-car-common.h>
7
16#ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_H 8#ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_H
17#define _DT_BINDINGS_CLOCK_TEGRA124_CAR_H 9#define _DT_BINDINGS_CLOCK_TEGRA124_CAR_H
18 10
19/* 0 */ 11#define TEGRA124_CLK_PLL_X 227
20/* 1 */ 12#define TEGRA124_CLK_PLL_X_OUT0 228
21/* 2 */
22#define TEGRA124_CLK_ISPB 3
23#define TEGRA124_CLK_RTC 4
24#define TEGRA124_CLK_TIMER 5
25#define TEGRA124_CLK_UARTA 6
26/* 7 (register bit affects uartb and vfir) */
27/* 8 */
28#define TEGRA124_CLK_SDMMC2 9
29/* 10 (register bit affects spdif_in and spdif_out) */
30#define TEGRA124_CLK_I2S1 11
31#define TEGRA124_CLK_I2C1 12
32/* 13 */
33#define TEGRA124_CLK_SDMMC1 14
34#define TEGRA124_CLK_SDMMC4 15
35/* 16 */
36#define TEGRA124_CLK_PWM 17
37#define TEGRA124_CLK_I2S2 18
38/* 20 (register bit affects vi and vi_sensor) */
39/* 21 */
40#define TEGRA124_CLK_USBD 22
41#define TEGRA124_CLK_ISP 23
42/* 26 */
43/* 25 */
44#define TEGRA124_CLK_DISP2 26
45#define TEGRA124_CLK_DISP1 27
46#define TEGRA124_CLK_HOST1X 28
47#define TEGRA124_CLK_VCP 29
48#define TEGRA124_CLK_I2S0 30
49/* 31 */
50
51#define TEGRA124_CLK_MC 32
52/* 33 */
53#define TEGRA124_CLK_APBDMA 34
54/* 35 */
55#define TEGRA124_CLK_KBC 36
56/* 37 */
57/* 38 */
58/* 39 (register bit affects fuse and fuse_burn) */
59#define TEGRA124_CLK_KFUSE 40
60#define TEGRA124_CLK_SBC1 41
61#define TEGRA124_CLK_NOR 42
62/* 43 */
63#define TEGRA124_CLK_SBC2 44
64/* 45 */
65#define TEGRA124_CLK_SBC3 46
66#define TEGRA124_CLK_I2C5 47
67#define TEGRA124_CLK_DSIA 48
68/* 49 */
69#define TEGRA124_CLK_MIPI 50
70#define TEGRA124_CLK_HDMI 51
71#define TEGRA124_CLK_CSI 52
72/* 53 */
73#define TEGRA124_CLK_I2C2 54
74#define TEGRA124_CLK_UARTC 55
75#define TEGRA124_CLK_MIPI_CAL 56
76#define TEGRA124_CLK_EMC 57
77#define TEGRA124_CLK_USB2 58
78#define TEGRA124_CLK_USB3 59
79/* 60 */
80#define TEGRA124_CLK_VDE 61
81#define TEGRA124_CLK_BSEA 62
82#define TEGRA124_CLK_BSEV 63
83
84/* 64 */
85#define TEGRA124_CLK_UARTD 65
86/* 66 */
87#define TEGRA124_CLK_I2C3 67
88#define TEGRA124_CLK_SBC4 68
89#define TEGRA124_CLK_SDMMC3 69
90#define TEGRA124_CLK_PCIE 70
91#define TEGRA124_CLK_OWR 71
92#define TEGRA124_CLK_AFI 72
93#define TEGRA124_CLK_CSITE 73
94/* 74 */
95/* 75 */
96#define TEGRA124_CLK_LA 76
97#define TEGRA124_CLK_TRACE 77
98#define TEGRA124_CLK_SOC_THERM 78
99#define TEGRA124_CLK_DTV 79
100/* 80 */
101#define TEGRA124_CLK_I2CSLOW 81
102#define TEGRA124_CLK_DSIB 82
103#define TEGRA124_CLK_TSEC 83
104/* 84 */
105/* 85 */
106/* 86 */
107/* 87 */
108/* 88 */
109#define TEGRA124_CLK_XUSB_HOST 89
110/* 90 */
111#define TEGRA124_CLK_MSENC 91
112#define TEGRA124_CLK_CSUS 92
113/* 93 */
114/* 94 */
115/* 95 (bit affects xusb_dev and xusb_dev_src) */
116
117/* 96 */
118/* 97 */
119/* 98 */
120#define TEGRA124_CLK_MSELECT 99
121#define TEGRA124_CLK_TSENSOR 100
122#define TEGRA124_CLK_I2S3 101
123#define TEGRA124_CLK_I2S4 102
124#define TEGRA124_CLK_I2C4 103
125#define TEGRA124_CLK_SBC5 104
126#define TEGRA124_CLK_SBC6 105
127#define TEGRA124_CLK_D_AUDIO 106
128#define TEGRA124_CLK_APBIF 107
129#define TEGRA124_CLK_DAM0 108
130#define TEGRA124_CLK_DAM1 109
131#define TEGRA124_CLK_DAM2 110
132#define TEGRA124_CLK_HDA2CODEC_2X 111
133/* 112 */
134#define TEGRA124_CLK_AUDIO0_2X 113
135#define TEGRA124_CLK_AUDIO1_2X 114
136#define TEGRA124_CLK_AUDIO2_2X 115
137#define TEGRA124_CLK_AUDIO3_2X 116
138#define TEGRA124_CLK_AUDIO4_2X 117
139#define TEGRA124_CLK_SPDIF_2X 118
140#define TEGRA124_CLK_ACTMON 119
141#define TEGRA124_CLK_EXTERN1 120
142#define TEGRA124_CLK_EXTERN2 121
143#define TEGRA124_CLK_EXTERN3 122
144#define TEGRA124_CLK_SATA_OOB 123
145#define TEGRA124_CLK_SATA 124
146#define TEGRA124_CLK_HDA 125
147/* 126 */
148#define TEGRA124_CLK_SE 127
149
150#define TEGRA124_CLK_HDA2HDMI 128
151#define TEGRA124_CLK_SATA_COLD 129
152/* 130 */
153/* 131 */
154/* 132 */
155/* 133 */
156/* 134 */
157/* 135 */
158/* 136 */
159/* 137 */
160/* 138 */
161/* 139 */
162/* 140 */
163/* 141 */
164/* 142 */
165/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */
166/* xusb_host_src and xusb_ss_src) */
167#define TEGRA124_CLK_CILAB 144
168#define TEGRA124_CLK_CILCD 145
169#define TEGRA124_CLK_CILE 146
170#define TEGRA124_CLK_DSIALP 147
171#define TEGRA124_CLK_DSIBLP 148
172#define TEGRA124_CLK_ENTROPY 149
173#define TEGRA124_CLK_DDS 150
174/* 151 */
175#define TEGRA124_CLK_DP2 152
176#define TEGRA124_CLK_AMX 153
177#define TEGRA124_CLK_ADX 154
178/* 155 (bit affects dfll_ref and dfll_soc) */
179#define TEGRA124_CLK_XUSB_SS 156
180/* 157 */
181/* 158 */
182/* 159 */
183
184/* 160 */
185/* 161 */
186/* 162 */
187/* 163 */
188/* 164 */
189/* 165 */
190#define TEGRA124_CLK_I2C6 166
191/* 167 */
192/* 168 */
193/* 169 */
194/* 170 */
195#define TEGRA124_CLK_VIM2_CLK 171
196/* 172 */
197/* 173 */
198/* 174 */
199/* 175 */
200#define TEGRA124_CLK_HDMI_AUDIO 176
201#define TEGRA124_CLK_CLK72MHZ 177
202#define TEGRA124_CLK_VIC03 178
203/* 179 */
204#define TEGRA124_CLK_ADX1 180
205#define TEGRA124_CLK_DPAUX 181
206#define TEGRA124_CLK_SOR0 182
207/* 183 */
208#define TEGRA124_CLK_GPU 184
209#define TEGRA124_CLK_AMX1 185
210/* 186 */
211/* 187 */
212/* 188 */
213/* 189 */
214/* 190 */
215/* 191 */
216#define TEGRA124_CLK_UARTB 192
217#define TEGRA124_CLK_VFIR 193
218#define TEGRA124_CLK_SPDIF_IN 194
219#define TEGRA124_CLK_SPDIF_OUT 195
220#define TEGRA124_CLK_VI 196
221#define TEGRA124_CLK_VI_SENSOR 197
222#define TEGRA124_CLK_FUSE 198
223#define TEGRA124_CLK_FUSE_BURN 199
224#define TEGRA124_CLK_CLK_32K 200
225#define TEGRA124_CLK_CLK_M 201
226#define TEGRA124_CLK_CLK_M_DIV2 202
227#define TEGRA124_CLK_CLK_M_DIV4 203
228#define TEGRA124_CLK_PLL_REF 204
229#define TEGRA124_CLK_PLL_C 205
230#define TEGRA124_CLK_PLL_C_OUT1 206
231#define TEGRA124_CLK_PLL_C2 207
232#define TEGRA124_CLK_PLL_C3 208
233#define TEGRA124_CLK_PLL_M 209
234#define TEGRA124_CLK_PLL_M_OUT1 210
235#define TEGRA124_CLK_PLL_P 211
236#define TEGRA124_CLK_PLL_P_OUT1 212
237#define TEGRA124_CLK_PLL_P_OUT2 213
238#define TEGRA124_CLK_PLL_P_OUT3 214
239#define TEGRA124_CLK_PLL_P_OUT4 215
240#define TEGRA124_CLK_PLL_A 216
241#define TEGRA124_CLK_PLL_A_OUT0 217
242#define TEGRA124_CLK_PLL_D 218
243#define TEGRA124_CLK_PLL_D_OUT0 219
244#define TEGRA124_CLK_PLL_D2 220
245#define TEGRA124_CLK_PLL_D2_OUT0 221
246#define TEGRA124_CLK_PLL_U 222
247#define TEGRA124_CLK_PLL_U_480M 223
248
249#define TEGRA124_CLK_PLL_U_60M 224
250#define TEGRA124_CLK_PLL_U_48M 225
251#define TEGRA124_CLK_PLL_U_12M 226
252#define TEGRA124_CLK_PLL_X 227
253#define TEGRA124_CLK_PLL_X_OUT0 228
254#define TEGRA124_CLK_PLL_RE_VCO 229
255#define TEGRA124_CLK_PLL_RE_OUT 230
256#define TEGRA124_CLK_PLL_E 231
257#define TEGRA124_CLK_SPDIF_IN_SYNC 232
258#define TEGRA124_CLK_I2S0_SYNC 233
259#define TEGRA124_CLK_I2S1_SYNC 234
260#define TEGRA124_CLK_I2S2_SYNC 235
261#define TEGRA124_CLK_I2S3_SYNC 236
262#define TEGRA124_CLK_I2S4_SYNC 237
263#define TEGRA124_CLK_VIMCLK_SYNC 238
264#define TEGRA124_CLK_AUDIO0 239
265#define TEGRA124_CLK_AUDIO1 240
266#define TEGRA124_CLK_AUDIO2 241
267#define TEGRA124_CLK_AUDIO3 242
268#define TEGRA124_CLK_AUDIO4 243
269#define TEGRA124_CLK_SPDIF 244
270#define TEGRA124_CLK_CLK_OUT_1 245
271#define TEGRA124_CLK_CLK_OUT_2 246
272#define TEGRA124_CLK_CLK_OUT_3 247
273#define TEGRA124_CLK_BLINK 248
274/* 249 */
275/* 250 */
276/* 251 */
277#define TEGRA124_CLK_XUSB_HOST_SRC 252
278#define TEGRA124_CLK_XUSB_FALCON_SRC 253
279#define TEGRA124_CLK_XUSB_FS_SRC 254
280#define TEGRA124_CLK_XUSB_SS_SRC 255
281
282#define TEGRA124_CLK_XUSB_DEV_SRC 256
283#define TEGRA124_CLK_XUSB_DEV 257
284#define TEGRA124_CLK_XUSB_HS_SRC 258
285#define TEGRA124_CLK_SCLK 259
286#define TEGRA124_CLK_HCLK 260
287#define TEGRA124_CLK_PCLK 261
288#define TEGRA124_CLK_CCLK_G 262
289#define TEGRA124_CLK_CCLK_LP 263
290#define TEGRA124_CLK_DFLL_REF 264
291#define TEGRA124_CLK_DFLL_SOC 265
292#define TEGRA124_CLK_VI_SENSOR2 266
293#define TEGRA124_CLK_PLL_P_OUT5 267
294#define TEGRA124_CLK_CML0 268
295#define TEGRA124_CLK_CML1 269
296#define TEGRA124_CLK_PLL_C4 270
297#define TEGRA124_CLK_PLL_DP 271
298#define TEGRA124_CLK_PLL_E_MUX 272
299/* 273 */
300/* 274 */
301/* 275 */
302/* 276 */
303/* 277 */
304/* 278 */
305/* 279 */
306/* 280 */
307/* 281 */
308/* 282 */
309/* 283 */
310/* 284 */
311/* 285 */
312/* 286 */
313/* 287 */
314
315/* 288 */
316/* 289 */
317/* 290 */
318/* 291 */
319/* 292 */
320/* 293 */
321/* 294 */
322/* 295 */
323/* 296 */
324/* 297 */
325/* 298 */
326/* 299 */
327#define TEGRA124_CLK_AUDIO0_MUX 300
328#define TEGRA124_CLK_AUDIO1_MUX 301
329#define TEGRA124_CLK_AUDIO2_MUX 302
330#define TEGRA124_CLK_AUDIO3_MUX 303
331#define TEGRA124_CLK_AUDIO4_MUX 304
332#define TEGRA124_CLK_SPDIF_MUX 305
333#define TEGRA124_CLK_CLK_OUT_1_MUX 306
334#define TEGRA124_CLK_CLK_OUT_2_MUX 307
335#define TEGRA124_CLK_CLK_OUT_3_MUX 308
336#define TEGRA124_CLK_DSIA_MUX 309
337#define TEGRA124_CLK_DSIB_MUX 310
338#define TEGRA124_CLK_SOR0_LVDS 311
339#define TEGRA124_CLK_XUSB_SS_DIV2 312
340 13
341#define TEGRA124_CLK_PLL_M_UD 313 14#define TEGRA124_CLK_CCLK_G 262
342#define TEGRA124_CLK_PLL_C_UD 314 15#define TEGRA124_CLK_CCLK_LP 263
343 16
344#define TEGRA124_CLK_CLK_MAX 315 17#define TEGRA124_CLK_CLK_MAX 315
345 18
346#endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_H */ 19#endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_H */
diff --git a/include/dt-bindings/mfd/qcom-rpm.h b/include/dt-bindings/mfd/qcom-rpm.h
new file mode 100644
index 000000000000..388a6f3d6165
--- /dev/null
+++ b/include/dt-bindings/mfd/qcom-rpm.h
@@ -0,0 +1,154 @@
1/*
2 * This header provides constants for the Qualcomm RPM bindings.
3 */
4
5#ifndef _DT_BINDINGS_MFD_QCOM_RPM_H
6#define _DT_BINDINGS_MFD_QCOM_RPM_H
7
8/*
9 * Constants use to identify individual resources in the RPM.
10 */
11#define QCOM_RPM_APPS_FABRIC_ARB 1
12#define QCOM_RPM_APPS_FABRIC_CLK 2
13#define QCOM_RPM_APPS_FABRIC_HALT 3
14#define QCOM_RPM_APPS_FABRIC_IOCTL 4
15#define QCOM_RPM_APPS_FABRIC_MODE 5
16#define QCOM_RPM_APPS_L2_CACHE_CTL 6
17#define QCOM_RPM_CFPB_CLK 7
18#define QCOM_RPM_CXO_BUFFERS 8
19#define QCOM_RPM_CXO_CLK 9
20#define QCOM_RPM_DAYTONA_FABRIC_CLK 10
21#define QCOM_RPM_DDR_DMM 11
22#define QCOM_RPM_EBI1_CLK 12
23#define QCOM_RPM_HDMI_SWITCH 13
24#define QCOM_RPM_MMFPB_CLK 14
25#define QCOM_RPM_MM_FABRIC_ARB 15
26#define QCOM_RPM_MM_FABRIC_CLK 16
27#define QCOM_RPM_MM_FABRIC_HALT 17
28#define QCOM_RPM_MM_FABRIC_IOCTL 18
29#define QCOM_RPM_MM_FABRIC_MODE 19
30#define QCOM_RPM_PLL_4 20
31#define QCOM_RPM_PM8058_LDO0 21
32#define QCOM_RPM_PM8058_LDO1 22
33#define QCOM_RPM_PM8058_LDO2 23
34#define QCOM_RPM_PM8058_LDO3 24
35#define QCOM_RPM_PM8058_LDO4 25
36#define QCOM_RPM_PM8058_LDO5 26
37#define QCOM_RPM_PM8058_LDO6 27
38#define QCOM_RPM_PM8058_LDO7 28
39#define QCOM_RPM_PM8058_LDO8 29
40#define QCOM_RPM_PM8058_LDO9 30
41#define QCOM_RPM_PM8058_LDO10 31
42#define QCOM_RPM_PM8058_LDO11 32
43#define QCOM_RPM_PM8058_LDO12 33
44#define QCOM_RPM_PM8058_LDO13 34
45#define QCOM_RPM_PM8058_LDO14 35
46#define QCOM_RPM_PM8058_LDO15 36
47#define QCOM_RPM_PM8058_LDO16 37
48#define QCOM_RPM_PM8058_LDO17 38
49#define QCOM_RPM_PM8058_LDO18 39
50#define QCOM_RPM_PM8058_LDO19 40
51#define QCOM_RPM_PM8058_LDO20 41
52#define QCOM_RPM_PM8058_LDO21 42
53#define QCOM_RPM_PM8058_LDO22 43
54#define QCOM_RPM_PM8058_LDO23 44
55#define QCOM_RPM_PM8058_LDO24 45
56#define QCOM_RPM_PM8058_LDO25 46
57#define QCOM_RPM_PM8058_LVS0 47
58#define QCOM_RPM_PM8058_LVS1 48
59#define QCOM_RPM_PM8058_NCP 49
60#define QCOM_RPM_PM8058_SMPS0 50
61#define QCOM_RPM_PM8058_SMPS1 51
62#define QCOM_RPM_PM8058_SMPS2 52
63#define QCOM_RPM_PM8058_SMPS3 53
64#define QCOM_RPM_PM8058_SMPS4 54
65#define QCOM_RPM_PM8821_LDO1 55
66#define QCOM_RPM_PM8821_SMPS1 56
67#define QCOM_RPM_PM8821_SMPS2 57
68#define QCOM_RPM_PM8901_LDO0 58
69#define QCOM_RPM_PM8901_LDO1 59
70#define QCOM_RPM_PM8901_LDO2 60
71#define QCOM_RPM_PM8901_LDO3 61
72#define QCOM_RPM_PM8901_LDO4 62
73#define QCOM_RPM_PM8901_LDO5 63
74#define QCOM_RPM_PM8901_LDO6 64
75#define QCOM_RPM_PM8901_LVS0 65
76#define QCOM_RPM_PM8901_LVS1 66
77#define QCOM_RPM_PM8901_LVS2 67
78#define QCOM_RPM_PM8901_LVS3 68
79#define QCOM_RPM_PM8901_MVS 69
80#define QCOM_RPM_PM8901_SMPS0 70
81#define QCOM_RPM_PM8901_SMPS1 71
82#define QCOM_RPM_PM8901_SMPS2 72
83#define QCOM_RPM_PM8901_SMPS3 73
84#define QCOM_RPM_PM8901_SMPS4 74
85#define QCOM_RPM_PM8921_CLK1 75
86#define QCOM_RPM_PM8921_CLK2 76
87#define QCOM_RPM_PM8921_LDO1 77
88#define QCOM_RPM_PM8921_LDO2 78
89#define QCOM_RPM_PM8921_LDO3 79
90#define QCOM_RPM_PM8921_LDO4 80
91#define QCOM_RPM_PM8921_LDO5 81
92#define QCOM_RPM_PM8921_LDO6 82
93#define QCOM_RPM_PM8921_LDO7 83
94#define QCOM_RPM_PM8921_LDO8 84
95#define QCOM_RPM_PM8921_LDO9 85
96#define QCOM_RPM_PM8921_LDO10 86
97#define QCOM_RPM_PM8921_LDO11 87
98#define QCOM_RPM_PM8921_LDO12 88
99#define QCOM_RPM_PM8921_LDO13 89
100#define QCOM_RPM_PM8921_LDO14 90
101#define QCOM_RPM_PM8921_LDO15 91
102#define QCOM_RPM_PM8921_LDO16 92
103#define QCOM_RPM_PM8921_LDO17 93
104#define QCOM_RPM_PM8921_LDO18 94
105#define QCOM_RPM_PM8921_LDO19 95
106#define QCOM_RPM_PM8921_LDO20 96
107#define QCOM_RPM_PM8921_LDO21 97
108#define QCOM_RPM_PM8921_LDO22 98
109#define QCOM_RPM_PM8921_LDO23 99
110#define QCOM_RPM_PM8921_LDO24 100
111#define QCOM_RPM_PM8921_LDO25 101
112#define QCOM_RPM_PM8921_LDO26 102
113#define QCOM_RPM_PM8921_LDO27 103
114#define QCOM_RPM_PM8921_LDO28 104
115#define QCOM_RPM_PM8921_LDO29 105
116#define QCOM_RPM_PM8921_LVS1 106
117#define QCOM_RPM_PM8921_LVS2 107
118#define QCOM_RPM_PM8921_LVS3 108
119#define QCOM_RPM_PM8921_LVS4 109
120#define QCOM_RPM_PM8921_LVS5 110
121#define QCOM_RPM_PM8921_LVS6 111
122#define QCOM_RPM_PM8921_LVS7 112
123#define QCOM_RPM_PM8921_MVS 113
124#define QCOM_RPM_PM8921_NCP 114
125#define QCOM_RPM_PM8921_SMPS1 115
126#define QCOM_RPM_PM8921_SMPS2 116
127#define QCOM_RPM_PM8921_SMPS3 117
128#define QCOM_RPM_PM8921_SMPS4 118
129#define QCOM_RPM_PM8921_SMPS5 119
130#define QCOM_RPM_PM8921_SMPS6 120
131#define QCOM_RPM_PM8921_SMPS7 121
132#define QCOM_RPM_PM8921_SMPS8 122
133#define QCOM_RPM_PXO_CLK 123
134#define QCOM_RPM_QDSS_CLK 124
135#define QCOM_RPM_SFPB_CLK 125
136#define QCOM_RPM_SMI_CLK 126
137#define QCOM_RPM_SYS_FABRIC_ARB 127
138#define QCOM_RPM_SYS_FABRIC_CLK 128
139#define QCOM_RPM_SYS_FABRIC_HALT 129
140#define QCOM_RPM_SYS_FABRIC_IOCTL 130
141#define QCOM_RPM_SYS_FABRIC_MODE 131
142#define QCOM_RPM_USB_OTG_SWITCH 132
143#define QCOM_RPM_VDDMIN_GPIO 133
144
145/*
146 * Constants used to select force mode for regulators.
147 */
148#define QCOM_RPM_FORCE_MODE_NONE 0
149#define QCOM_RPM_FORCE_MODE_LPM 1
150#define QCOM_RPM_FORCE_MODE_HPM 2
151#define QCOM_RPM_FORCE_MODE_AUTO 3
152#define QCOM_RPM_FORCE_MODE_BYPASS 4
153
154#endif
diff --git a/include/linux/clk/sunxi.h b/include/dt-bindings/thermal/thermal_exynos.h
index aed28c4451d9..0646500bca69 100644
--- a/include/linux/clk/sunxi.h
+++ b/include/dt-bindings/thermal/thermal_exynos.h
@@ -1,5 +1,8 @@
1/* 1/*
2 * Copyright 2013 - Hans de Goede <hdegoede@redhat.com> 2 * thermal_exynos.h - Samsung EXYNOS TMU device tree definitions
3 *
4 * Copyright (C) 2014 Samsung Electronics
5 * Lukasz Majewski <l.majewski@samsung.com>
3 * 6 *
4 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -10,13 +13,16 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 *
13 */ 17 */
14 18
15#ifndef __LINUX_CLK_SUNXI_H_ 19#ifndef _EXYNOS_THERMAL_TMU_DT_H
16#define __LINUX_CLK_SUNXI_H_ 20#define _EXYNOS_THERMAL_TMU_DT_H
17
18#include <linux/clk.h>
19 21
20void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output); 22#define TYPE_ONE_POINT_TRIMMING 0
23#define TYPE_ONE_POINT_TRIMMING_25 1
24#define TYPE_ONE_POINT_TRIMMING_85 2
25#define TYPE_TWO_POINT_TRIMMING 3
26#define TYPE_NONE 4
21 27
22#endif 28#endif /* _EXYNOS_THERMAL_TMU_DT_H */
diff --git a/include/linux/bcm47xx_wdt.h b/include/linux/bcm47xx_wdt.h
index b708786d4cbf..5582c211f594 100644
--- a/include/linux/bcm47xx_wdt.h
+++ b/include/linux/bcm47xx_wdt.h
@@ -16,6 +16,7 @@ struct bcm47xx_wdt {
16 16
17 struct watchdog_device wdd; 17 struct watchdog_device wdd;
18 struct notifier_block notifier; 18 struct notifier_block notifier;
19 struct notifier_block restart_handler;
19 20
20 struct timer_list soft_timer; 21 struct timer_list soft_timer;
21 atomic_t soft_ticks; 22 atomic_t soft_ticks;
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index c0dadaac26e3..31eb03d0c766 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -158,17 +158,6 @@ enum {
158}; 158};
159 159
160 160
161/* pool operations */
162enum {
163 POOL_OP_CREATE = 0x01,
164 POOL_OP_DELETE = 0x02,
165 POOL_OP_AUID_CHANGE = 0x03,
166 POOL_OP_CREATE_SNAP = 0x11,
167 POOL_OP_DELETE_SNAP = 0x12,
168 POOL_OP_CREATE_UNMANAGED_SNAP = 0x21,
169 POOL_OP_DELETE_UNMANAGED_SNAP = 0x22,
170};
171
172struct ceph_mon_request_header { 161struct ceph_mon_request_header {
173 __le64 have_version; 162 __le64 have_version;
174 __le16 session_mon; 163 __le16 session_mon;
@@ -191,31 +180,6 @@ struct ceph_mon_statfs_reply {
191 struct ceph_statfs st; 180 struct ceph_statfs st;
192} __attribute__ ((packed)); 181} __attribute__ ((packed));
193 182
194const char *ceph_pool_op_name(int op);
195
196struct ceph_mon_poolop {
197 struct ceph_mon_request_header monhdr;
198 struct ceph_fsid fsid;
199 __le32 pool;
200 __le32 op;
201 __le64 auid;
202 __le64 snapid;
203 __le32 name_len;
204} __attribute__ ((packed));
205
206struct ceph_mon_poolop_reply {
207 struct ceph_mon_request_header monhdr;
208 struct ceph_fsid fsid;
209 __le32 reply_code;
210 __le32 epoch;
211 char has_data;
212 char data[0];
213} __attribute__ ((packed));
214
215struct ceph_mon_unmanaged_snap {
216 __le64 snapid;
217} __attribute__ ((packed));
218
219struct ceph_osd_getmap { 183struct ceph_osd_getmap {
220 struct ceph_mon_request_header monhdr; 184 struct ceph_mon_request_header monhdr;
221 struct ceph_fsid fsid; 185 struct ceph_fsid fsid;
@@ -307,6 +271,7 @@ enum {
307 CEPH_SESSION_RECALL_STATE, 271 CEPH_SESSION_RECALL_STATE,
308 CEPH_SESSION_FLUSHMSG, 272 CEPH_SESSION_FLUSHMSG,
309 CEPH_SESSION_FLUSHMSG_ACK, 273 CEPH_SESSION_FLUSHMSG_ACK,
274 CEPH_SESSION_FORCE_RO,
310}; 275};
311 276
312extern const char *ceph_session_op_name(int op); 277extern const char *ceph_session_op_name(int op);
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 8b11a79ca1cb..16fff9608848 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -30,8 +30,9 @@
30#define CEPH_OPT_MYIP (1<<2) /* specified my ip */ 30#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
31#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ 31#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
32#define CEPH_OPT_NOMSGAUTH (1<<4) /* not require cephx message signature */ 32#define CEPH_OPT_NOMSGAUTH (1<<4) /* not require cephx message signature */
33#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
33 34
34#define CEPH_OPT_DEFAULT (0) 35#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
35 36
36#define ceph_set_opt(client, opt) \ 37#define ceph_set_opt(client, opt) \
37 (client)->options->flags |= CEPH_OPT_##opt; 38 (client)->options->flags |= CEPH_OPT_##opt;
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index d9d396c16503..e15499422fdc 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -57,6 +57,7 @@ struct ceph_messenger {
57 57
58 atomic_t stopping; 58 atomic_t stopping;
59 bool nocrc; 59 bool nocrc;
60 bool tcp_nodelay;
60 61
61 /* 62 /*
62 * the global_seq counts connections i (attempt to) initiate 63 * the global_seq counts connections i (attempt to) initiate
@@ -264,7 +265,8 @@ extern void ceph_messenger_init(struct ceph_messenger *msgr,
264 struct ceph_entity_addr *myaddr, 265 struct ceph_entity_addr *myaddr,
265 u64 supported_features, 266 u64 supported_features,
266 u64 required_features, 267 u64 required_features,
267 bool nocrc); 268 bool nocrc,
269 bool tcp_nodelay);
268 270
269extern void ceph_con_init(struct ceph_connection *con, void *private, 271extern void ceph_con_init(struct ceph_connection *con, void *private,
270 const struct ceph_connection_operations *ops, 272 const struct ceph_connection_operations *ops,
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index deb47e45ac7c..81810dc21f06 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -40,7 +40,7 @@ struct ceph_mon_request {
40}; 40};
41 41
42/* 42/*
43 * ceph_mon_generic_request is being used for the statfs, poolop and 43 * ceph_mon_generic_request is being used for the statfs and
44 * mon_get_version requests which are being done a bit differently 44 * mon_get_version requests which are being done a bit differently
45 * because we need to get data back to the caller 45 * because we need to get data back to the caller
46 */ 46 */
@@ -50,7 +50,6 @@ struct ceph_mon_generic_request {
50 struct rb_node node; 50 struct rb_node node;
51 int result; 51 int result;
52 void *buf; 52 void *buf;
53 int buf_len;
54 struct completion completion; 53 struct completion completion;
55 struct ceph_msg *request; /* original request */ 54 struct ceph_msg *request; /* original request */
56 struct ceph_msg *reply; /* and reply */ 55 struct ceph_msg *reply; /* and reply */
@@ -117,10 +116,4 @@ extern int ceph_monc_open_session(struct ceph_mon_client *monc);
117 116
118extern int ceph_monc_validate_auth(struct ceph_mon_client *monc); 117extern int ceph_monc_validate_auth(struct ceph_mon_client *monc);
119 118
120extern int ceph_monc_create_snapid(struct ceph_mon_client *monc,
121 u32 pool, u64 *snapid);
122
123extern int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
124 u32 pool, u64 snapid);
125
126#endif 119#endif
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
deleted file mode 100644
index 0ca5f6046920..000000000000
--- a/include/linux/clk-private.h
+++ /dev/null
@@ -1,220 +0,0 @@
1/*
2 * linux/include/linux/clk-private.h
3 *
4 * Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
5 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __LINUX_CLK_PRIVATE_H
12#define __LINUX_CLK_PRIVATE_H
13
14#include <linux/clk-provider.h>
15#include <linux/kref.h>
16#include <linux/list.h>
17
18/*
19 * WARNING: Do not include clk-private.h from any file that implements struct
20 * clk_ops. Doing so is a layering violation!
21 *
22 * This header exists only to allow for statically initialized clock data. Any
23 * static clock data must be defined in a separate file from the logic that
24 * implements the clock operations for that same data.
25 */
26
27#ifdef CONFIG_COMMON_CLK
28
29struct module;
30
31struct clk {
32 const char *name;
33 const struct clk_ops *ops;
34 struct clk_hw *hw;
35 struct module *owner;
36 struct clk *parent;
37 const char **parent_names;
38 struct clk **parents;
39 u8 num_parents;
40 u8 new_parent_index;
41 unsigned long rate;
42 unsigned long new_rate;
43 struct clk *new_parent;
44 struct clk *new_child;
45 unsigned long flags;
46 unsigned int enable_count;
47 unsigned int prepare_count;
48 unsigned long accuracy;
49 int phase;
50 struct hlist_head children;
51 struct hlist_node child_node;
52 struct hlist_node debug_node;
53 unsigned int notifier_count;
54#ifdef CONFIG_DEBUG_FS
55 struct dentry *dentry;
56#endif
57 struct kref ref;
58};
59
60/*
61 * DOC: Basic clock implementations common to many platforms
62 *
63 * Each basic clock hardware type is comprised of a structure describing the
64 * clock hardware, implementations of the relevant callbacks in struct clk_ops,
65 * unique flags for that hardware type, a registration function and an
66 * alternative macro for static initialization
67 */
68
69#define DEFINE_CLK(_name, _ops, _flags, _parent_names, \
70 _parents) \
71 static struct clk _name = { \
72 .name = #_name, \
73 .ops = &_ops, \
74 .hw = &_name##_hw.hw, \
75 .parent_names = _parent_names, \
76 .num_parents = ARRAY_SIZE(_parent_names), \
77 .parents = _parents, \
78 .flags = _flags | CLK_IS_BASIC, \
79 }
80
81#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate, \
82 _fixed_rate_flags) \
83 static struct clk _name; \
84 static const char *_name##_parent_names[] = {}; \
85 static struct clk_fixed_rate _name##_hw = { \
86 .hw = { \
87 .clk = &_name, \
88 }, \
89 .fixed_rate = _rate, \
90 .flags = _fixed_rate_flags, \
91 }; \
92 DEFINE_CLK(_name, clk_fixed_rate_ops, _flags, \
93 _name##_parent_names, NULL);
94
95#define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr, \
96 _flags, _reg, _bit_idx, \
97 _gate_flags, _lock) \
98 static struct clk _name; \
99 static const char *_name##_parent_names[] = { \
100 _parent_name, \
101 }; \
102 static struct clk *_name##_parents[] = { \
103 _parent_ptr, \
104 }; \
105 static struct clk_gate _name##_hw = { \
106 .hw = { \
107 .clk = &_name, \
108 }, \
109 .reg = _reg, \
110 .bit_idx = _bit_idx, \
111 .flags = _gate_flags, \
112 .lock = _lock, \
113 }; \
114 DEFINE_CLK(_name, clk_gate_ops, _flags, \
115 _name##_parent_names, _name##_parents);
116
117#define _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
118 _flags, _reg, _shift, _width, \
119 _divider_flags, _table, _lock) \
120 static struct clk _name; \
121 static const char *_name##_parent_names[] = { \
122 _parent_name, \
123 }; \
124 static struct clk *_name##_parents[] = { \
125 _parent_ptr, \
126 }; \
127 static struct clk_divider _name##_hw = { \
128 .hw = { \
129 .clk = &_name, \
130 }, \
131 .reg = _reg, \
132 .shift = _shift, \
133 .width = _width, \
134 .flags = _divider_flags, \
135 .table = _table, \
136 .lock = _lock, \
137 }; \
138 DEFINE_CLK(_name, clk_divider_ops, _flags, \
139 _name##_parent_names, _name##_parents);
140
141#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
142 _flags, _reg, _shift, _width, \
143 _divider_flags, _lock) \
144 _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
145 _flags, _reg, _shift, _width, \
146 _divider_flags, NULL, _lock)
147
148#define DEFINE_CLK_DIVIDER_TABLE(_name, _parent_name, \
149 _parent_ptr, _flags, _reg, \
150 _shift, _width, _divider_flags, \
151 _table, _lock) \
152 _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
153 _flags, _reg, _shift, _width, \
154 _divider_flags, _table, _lock) \
155
156#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \
157 _reg, _shift, _width, \
158 _mux_flags, _lock) \
159 static struct clk _name; \
160 static struct clk_mux _name##_hw = { \
161 .hw = { \
162 .clk = &_name, \
163 }, \
164 .reg = _reg, \
165 .shift = _shift, \
166 .mask = BIT(_width) - 1, \
167 .flags = _mux_flags, \
168 .lock = _lock, \
169 }; \
170 DEFINE_CLK(_name, clk_mux_ops, _flags, _parent_names, \
171 _parents);
172
173#define DEFINE_CLK_FIXED_FACTOR(_name, _parent_name, \
174 _parent_ptr, _flags, \
175 _mult, _div) \
176 static struct clk _name; \
177 static const char *_name##_parent_names[] = { \
178 _parent_name, \
179 }; \
180 static struct clk *_name##_parents[] = { \
181 _parent_ptr, \
182 }; \
183 static struct clk_fixed_factor _name##_hw = { \
184 .hw = { \
185 .clk = &_name, \
186 }, \
187 .mult = _mult, \
188 .div = _div, \
189 }; \
190 DEFINE_CLK(_name, clk_fixed_factor_ops, _flags, \
191 _name##_parent_names, _name##_parents);
192
193/**
194 * __clk_init - initialize the data structures in a struct clk
195 * @dev: device initializing this clk, placeholder for now
196 * @clk: clk being initialized
197 *
198 * Initializes the lists in struct clk, queries the hardware for the
199 * parent and rate and sets them both.
200 *
201 * Any struct clk passed into __clk_init must have the following members
202 * populated:
203 * .name
204 * .ops
205 * .hw
206 * .parent_names
207 * .num_parents
208 * .flags
209 *
210 * It is not necessary to call clk_register if __clk_init is used directly with
211 * statically initialized clock data.
212 *
213 * Returns 0 on success, otherwise an error code.
214 */
215int __clk_init(struct device *dev, struct clk *clk);
216
217struct clk *__clk_register(struct device *dev, struct clk_hw *hw);
218
219#endif /* CONFIG_COMMON_CLK */
220#endif /* CLK_PRIVATE_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index d936409520f8..5591ea71a8d1 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -33,6 +33,7 @@
33#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ 33#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
34 34
35struct clk_hw; 35struct clk_hw;
36struct clk_core;
36struct dentry; 37struct dentry;
37 38
38/** 39/**
@@ -174,9 +175,12 @@ struct clk_ops {
174 unsigned long parent_rate); 175 unsigned long parent_rate);
175 long (*round_rate)(struct clk_hw *hw, unsigned long rate, 176 long (*round_rate)(struct clk_hw *hw, unsigned long rate,
176 unsigned long *parent_rate); 177 unsigned long *parent_rate);
177 long (*determine_rate)(struct clk_hw *hw, unsigned long rate, 178 long (*determine_rate)(struct clk_hw *hw,
178 unsigned long *best_parent_rate, 179 unsigned long rate,
179 struct clk_hw **best_parent_hw); 180 unsigned long min_rate,
181 unsigned long max_rate,
182 unsigned long *best_parent_rate,
183 struct clk_hw **best_parent_hw);
180 int (*set_parent)(struct clk_hw *hw, u8 index); 184 int (*set_parent)(struct clk_hw *hw, u8 index);
181 u8 (*get_parent)(struct clk_hw *hw); 185 u8 (*get_parent)(struct clk_hw *hw);
182 int (*set_rate)(struct clk_hw *hw, unsigned long rate, 186 int (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -216,13 +220,17 @@ struct clk_init_data {
216 * clk_foo and then referenced by the struct clk instance that uses struct 220 * clk_foo and then referenced by the struct clk instance that uses struct
217 * clk_foo's clk_ops 221 * clk_foo's clk_ops
218 * 222 *
219 * @clk: pointer to the struct clk instance that points back to this struct 223 * @core: pointer to the struct clk_core instance that points back to this
220 * clk_hw instance 224 * struct clk_hw instance
225 *
226 * @clk: pointer to the per-user struct clk instance that can be used to call
227 * into the clk API
221 * 228 *
222 * @init: pointer to struct clk_init_data that contains the init data shared 229 * @init: pointer to struct clk_init_data that contains the init data shared
223 * with the common clock framework. 230 * with the common clock framework.
224 */ 231 */
225struct clk_hw { 232struct clk_hw {
233 struct clk_core *core;
226 struct clk *clk; 234 struct clk *clk;
227 const struct clk_init_data *init; 235 const struct clk_init_data *init;
228}; 236};
@@ -294,6 +302,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
294 const char *parent_name, unsigned long flags, 302 const char *parent_name, unsigned long flags,
295 void __iomem *reg, u8 bit_idx, 303 void __iomem *reg, u8 bit_idx,
296 u8 clk_gate_flags, spinlock_t *lock); 304 u8 clk_gate_flags, spinlock_t *lock);
305void clk_unregister_gate(struct clk *clk);
297 306
298struct clk_div_table { 307struct clk_div_table {
299 unsigned int val; 308 unsigned int val;
@@ -352,6 +361,17 @@ struct clk_divider {
352#define CLK_DIVIDER_READ_ONLY BIT(5) 361#define CLK_DIVIDER_READ_ONLY BIT(5)
353 362
354extern const struct clk_ops clk_divider_ops; 363extern const struct clk_ops clk_divider_ops;
364
365unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
366 unsigned int val, const struct clk_div_table *table,
367 unsigned long flags);
368long divider_round_rate(struct clk_hw *hw, unsigned long rate,
369 unsigned long *prate, const struct clk_div_table *table,
370 u8 width, unsigned long flags);
371int divider_get_val(unsigned long rate, unsigned long parent_rate,
372 const struct clk_div_table *table, u8 width,
373 unsigned long flags);
374
355struct clk *clk_register_divider(struct device *dev, const char *name, 375struct clk *clk_register_divider(struct device *dev, const char *name,
356 const char *parent_name, unsigned long flags, 376 const char *parent_name, unsigned long flags,
357 void __iomem *reg, u8 shift, u8 width, 377 void __iomem *reg, u8 shift, u8 width,
@@ -361,6 +381,7 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
361 void __iomem *reg, u8 shift, u8 width, 381 void __iomem *reg, u8 shift, u8 width,
362 u8 clk_divider_flags, const struct clk_div_table *table, 382 u8 clk_divider_flags, const struct clk_div_table *table,
363 spinlock_t *lock); 383 spinlock_t *lock);
384void clk_unregister_divider(struct clk *clk);
364 385
365/** 386/**
366 * struct clk_mux - multiplexer clock 387 * struct clk_mux - multiplexer clock
@@ -382,6 +403,8 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
382 * register, and mask of mux bits are in higher 16-bit of this register. 403 * register, and mask of mux bits are in higher 16-bit of this register.
383 * While setting the mux bits, higher 16-bit should also be updated to 404 * While setting the mux bits, higher 16-bit should also be updated to
384 * indicate changing mux bits. 405 * indicate changing mux bits.
406 * CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired
407 * frequency.
385 */ 408 */
386struct clk_mux { 409struct clk_mux {
387 struct clk_hw hw; 410 struct clk_hw hw;
@@ -396,7 +419,8 @@ struct clk_mux {
396#define CLK_MUX_INDEX_ONE BIT(0) 419#define CLK_MUX_INDEX_ONE BIT(0)
397#define CLK_MUX_INDEX_BIT BIT(1) 420#define CLK_MUX_INDEX_BIT BIT(1)
398#define CLK_MUX_HIWORD_MASK BIT(2) 421#define CLK_MUX_HIWORD_MASK BIT(2)
399#define CLK_MUX_READ_ONLY BIT(3) /* mux setting cannot be changed */ 422#define CLK_MUX_READ_ONLY BIT(3) /* mux can't be changed */
423#define CLK_MUX_ROUND_CLOSEST BIT(4)
400 424
401extern const struct clk_ops clk_mux_ops; 425extern const struct clk_ops clk_mux_ops;
402extern const struct clk_ops clk_mux_ro_ops; 426extern const struct clk_ops clk_mux_ro_ops;
@@ -411,6 +435,8 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
411 void __iomem *reg, u8 shift, u32 mask, 435 void __iomem *reg, u8 shift, u32 mask,
412 u8 clk_mux_flags, u32 *table, spinlock_t *lock); 436 u8 clk_mux_flags, u32 *table, spinlock_t *lock);
413 437
438void clk_unregister_mux(struct clk *clk);
439
414void of_fixed_factor_clk_setup(struct device_node *node); 440void of_fixed_factor_clk_setup(struct device_node *node);
415 441
416/** 442/**
@@ -550,15 +576,29 @@ bool __clk_is_prepared(struct clk *clk);
550bool __clk_is_enabled(struct clk *clk); 576bool __clk_is_enabled(struct clk *clk);
551struct clk *__clk_lookup(const char *name); 577struct clk *__clk_lookup(const char *name);
552long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, 578long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
579 unsigned long min_rate,
580 unsigned long max_rate,
553 unsigned long *best_parent_rate, 581 unsigned long *best_parent_rate,
554 struct clk_hw **best_parent_p); 582 struct clk_hw **best_parent_p);
583unsigned long __clk_determine_rate(struct clk_hw *core,
584 unsigned long rate,
585 unsigned long min_rate,
586 unsigned long max_rate);
587long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
588 unsigned long min_rate,
589 unsigned long max_rate,
590 unsigned long *best_parent_rate,
591 struct clk_hw **best_parent_p);
592
593static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
594{
595 dst->clk = src->clk;
596 dst->core = src->core;
597}
555 598
556/* 599/*
557 * FIXME clock api without lock protection 600 * FIXME clock api without lock protection
558 */ 601 */
559int __clk_prepare(struct clk *clk);
560void __clk_unprepare(struct clk *clk);
561void __clk_reparent(struct clk *clk, struct clk *new_parent);
562unsigned long __clk_round_rate(struct clk *clk, unsigned long rate); 602unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
563 603
564struct of_device_id; 604struct of_device_id;
diff --git a/include/linux/clk.h b/include/linux/clk.h
index c7f258a81761..8381bbfbc308 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -302,6 +302,46 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
302int clk_set_rate(struct clk *clk, unsigned long rate); 302int clk_set_rate(struct clk *clk, unsigned long rate);
303 303
304/** 304/**
305 * clk_has_parent - check if a clock is a possible parent for another
306 * @clk: clock source
307 * @parent: parent clock source
308 *
309 * This function can be used in drivers that need to check that a clock can be
310 * the parent of another without actually changing the parent.
311 *
312 * Returns true if @parent is a possible parent for @clk, false otherwise.
313 */
314bool clk_has_parent(struct clk *clk, struct clk *parent);
315
316/**
317 * clk_set_rate_range - set a rate range for a clock source
318 * @clk: clock source
319 * @min: desired minimum clock rate in Hz, inclusive
320 * @max: desired maximum clock rate in Hz, inclusive
321 *
322 * Returns success (0) or negative errno.
323 */
324int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
325
326/**
327 * clk_set_min_rate - set a minimum clock rate for a clock source
328 * @clk: clock source
329 * @rate: desired minimum clock rate in Hz, inclusive
330 *
331 * Returns success (0) or negative errno.
332 */
333int clk_set_min_rate(struct clk *clk, unsigned long rate);
334
335/**
336 * clk_set_max_rate - set a maximum clock rate for a clock source
337 * @clk: clock source
338 * @rate: desired maximum clock rate in Hz, inclusive
339 *
340 * Returns success (0) or negative errno.
341 */
342int clk_set_max_rate(struct clk *clk, unsigned long rate);
343
344/**
305 * clk_set_parent - set the parent clock source for this clock 345 * clk_set_parent - set the parent clock source for this clock
306 * @clk: clock source 346 * @clk: clock source
307 * @parent: parent clock source 347 * @parent: parent clock source
@@ -374,6 +414,11 @@ static inline long clk_round_rate(struct clk *clk, unsigned long rate)
374 return 0; 414 return 0;
375} 415}
376 416
417static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
418{
419 return true;
420}
421
377static inline int clk_set_parent(struct clk *clk, struct clk *parent) 422static inline int clk_set_parent(struct clk *clk, struct clk *parent)
378{ 423{
379 return 0; 424 return 0;
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 3ca9fca827a2..19c4208f4752 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -120,6 +120,4 @@ static inline void tegra_cpu_clock_resume(void)
120} 120}
121#endif 121#endif
122 122
123void tegra_clocks_apply_init_table(void);
124
125#endif /* __LINUX_CLK_TEGRA_H_ */ 123#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 55ef529a0dbf..67844003493d 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -15,6 +15,7 @@
15#ifndef __LINUX_CLK_TI_H__ 15#ifndef __LINUX_CLK_TI_H__
16#define __LINUX_CLK_TI_H__ 16#define __LINUX_CLK_TI_H__
17 17
18#include <linux/clk-provider.h>
18#include <linux/clkdev.h> 19#include <linux/clkdev.h>
19 20
20/** 21/**
@@ -217,6 +218,13 @@ struct ti_dt_clk {
217/* Maximum number of clock memmaps */ 218/* Maximum number of clock memmaps */
218#define CLK_MAX_MEMMAPS 4 219#define CLK_MAX_MEMMAPS 4
219 220
221/* Static memmap indices */
222enum {
223 TI_CLKM_CM = 0,
224 TI_CLKM_PRM,
225 TI_CLKM_SCRM,
226};
227
220typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *); 228typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
221 229
222/** 230/**
@@ -263,6 +271,8 @@ int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
263 u8 index); 271 u8 index);
264long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, 272long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
265 unsigned long rate, 273 unsigned long rate,
274 unsigned long min_rate,
275 unsigned long max_rate,
266 unsigned long *best_parent_rate, 276 unsigned long *best_parent_rate,
267 struct clk_hw **best_parent_clk); 277 struct clk_hw **best_parent_clk);
268unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, 278unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
@@ -272,6 +282,8 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
272 unsigned long *parent_rate); 282 unsigned long *parent_rate);
273long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, 283long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
274 unsigned long rate, 284 unsigned long rate,
285 unsigned long min_rate,
286 unsigned long max_rate,
275 unsigned long *best_parent_rate, 287 unsigned long *best_parent_rate,
276 struct clk_hw **best_parent_clk); 288 struct clk_hw **best_parent_clk);
277u8 omap2_init_dpll_parent(struct clk_hw *hw); 289u8 omap2_init_dpll_parent(struct clk_hw *hw);
@@ -348,4 +360,17 @@ extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
348extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait; 360extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
349extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait; 361extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
350 362
363#ifdef CONFIG_ATAGS
364int omap3430_clk_legacy_init(void);
365int omap3430es1_clk_legacy_init(void);
366int omap36xx_clk_legacy_init(void);
367int am35xx_clk_legacy_init(void);
368#else
369static inline int omap3430_clk_legacy_init(void) { return -ENXIO; }
370static inline int omap3430es1_clk_legacy_init(void) { return -ENXIO; }
371static inline int omap36xx_clk_legacy_init(void) { return -ENXIO; }
372static inline int am35xx_clk_legacy_init(void) { return -ENXIO; }
373#endif
374
375
351#endif 376#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d1ec10a940ff..1b45e4a0519b 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -202,7 +202,7 @@ static __always_inline void data_access_exceeds_word_size(void)
202{ 202{
203} 203}
204 204
205static __always_inline void __read_once_size(volatile void *p, void *res, int size) 205static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
206{ 206{
207 switch (size) { 207 switch (size) {
208 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; 208 case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
@@ -259,10 +259,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
259 */ 259 */
260 260
261#define READ_ONCE(x) \ 261#define READ_ONCE(x) \
262 ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) 262 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
263 263
264#define WRITE_ONCE(x, val) \ 264#define WRITE_ONCE(x, val) \
265 ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; }) 265 ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
266 266
267#endif /* __KERNEL__ */ 267#endif /* __KERNEL__ */
268 268
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 92c08cf7670e..d8358799c594 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -215,13 +215,16 @@ struct dentry_operations {
215#define DCACHE_LRU_LIST 0x00080000 215#define DCACHE_LRU_LIST 0x00080000
216 216
217#define DCACHE_ENTRY_TYPE 0x00700000 217#define DCACHE_ENTRY_TYPE 0x00700000
218#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry */ 218#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry (maybe fallthru to nowhere) */
219#define DCACHE_DIRECTORY_TYPE 0x00100000 /* Normal directory */ 219#define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */
220#define DCACHE_AUTODIR_TYPE 0x00200000 /* Lookupless directory (presumed automount) */ 220#define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */
221#define DCACHE_SYMLINK_TYPE 0x00300000 /* Symlink */ 221#define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */
222#define DCACHE_FILE_TYPE 0x00400000 /* Other file type */ 222#define DCACHE_REGULAR_TYPE 0x00400000 /* Regular file type (or fallthru to such) */
223#define DCACHE_SPECIAL_TYPE 0x00500000 /* Other file type (or fallthru to such) */
224#define DCACHE_SYMLINK_TYPE 0x00600000 /* Symlink (or fallthru to such) */
223 225
224#define DCACHE_MAY_FREE 0x00800000 226#define DCACHE_MAY_FREE 0x00800000
227#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
225 228
226extern seqlock_t rename_lock; 229extern seqlock_t rename_lock;
227 230
@@ -423,6 +426,16 @@ static inline unsigned __d_entry_type(const struct dentry *dentry)
423 return dentry->d_flags & DCACHE_ENTRY_TYPE; 426 return dentry->d_flags & DCACHE_ENTRY_TYPE;
424} 427}
425 428
429static inline bool d_is_miss(const struct dentry *dentry)
430{
431 return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
432}
433
434static inline bool d_is_whiteout(const struct dentry *dentry)
435{
436 return __d_entry_type(dentry) == DCACHE_WHITEOUT_TYPE;
437}
438
426static inline bool d_can_lookup(const struct dentry *dentry) 439static inline bool d_can_lookup(const struct dentry *dentry)
427{ 440{
428 return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE; 441 return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE;
@@ -443,14 +456,25 @@ static inline bool d_is_symlink(const struct dentry *dentry)
443 return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE; 456 return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE;
444} 457}
445 458
459static inline bool d_is_reg(const struct dentry *dentry)
460{
461 return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE;
462}
463
464static inline bool d_is_special(const struct dentry *dentry)
465{
466 return __d_entry_type(dentry) == DCACHE_SPECIAL_TYPE;
467}
468
446static inline bool d_is_file(const struct dentry *dentry) 469static inline bool d_is_file(const struct dentry *dentry)
447{ 470{
448 return __d_entry_type(dentry) == DCACHE_FILE_TYPE; 471 return d_is_reg(dentry) || d_is_special(dentry);
449} 472}
450 473
451static inline bool d_is_negative(const struct dentry *dentry) 474static inline bool d_is_negative(const struct dentry *dentry)
452{ 475{
453 return __d_entry_type(dentry) == DCACHE_MISS_TYPE; 476 // TODO: check d_is_whiteout(dentry) also.
477 return d_is_miss(dentry);
454} 478}
455 479
456static inline bool d_is_positive(const struct dentry *dentry) 480static inline bool d_is_positive(const struct dentry *dentry)
@@ -458,10 +482,75 @@ static inline bool d_is_positive(const struct dentry *dentry)
458 return !d_is_negative(dentry); 482 return !d_is_negative(dentry);
459} 483}
460 484
485extern void d_set_fallthru(struct dentry *dentry);
486
487static inline bool d_is_fallthru(const struct dentry *dentry)
488{
489 return dentry->d_flags & DCACHE_FALLTHRU;
490}
491
492
461extern int sysctl_vfs_cache_pressure; 493extern int sysctl_vfs_cache_pressure;
462 494
463static inline unsigned long vfs_pressure_ratio(unsigned long val) 495static inline unsigned long vfs_pressure_ratio(unsigned long val)
464{ 496{
465 return mult_frac(val, sysctl_vfs_cache_pressure, 100); 497 return mult_frac(val, sysctl_vfs_cache_pressure, 100);
466} 498}
499
500/**
501 * d_inode - Get the actual inode of this dentry
502 * @dentry: The dentry to query
503 *
504 * This is the helper normal filesystems should use to get at their own inodes
505 * in their own dentries and ignore the layering superimposed upon them.
506 */
507static inline struct inode *d_inode(const struct dentry *dentry)
508{
509 return dentry->d_inode;
510}
511
512/**
513 * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE()
514 * @dentry: The dentry to query
515 *
516 * This is the helper normal filesystems should use to get at their own inodes
517 * in their own dentries and ignore the layering superimposed upon them.
518 */
519static inline struct inode *d_inode_rcu(const struct dentry *dentry)
520{
521 return ACCESS_ONCE(dentry->d_inode);
522}
523
524/**
525 * d_backing_inode - Get upper or lower inode we should be using
526 * @upper: The upper layer
527 *
528 * This is the helper that should be used to get at the inode that will be used
529 * if this dentry were to be opened as a file. The inode may be on the upper
530 * dentry or it may be on a lower dentry pinned by the upper.
531 *
532 * Normal filesystems should not use this to access their own inodes.
533 */
534static inline struct inode *d_backing_inode(const struct dentry *upper)
535{
536 struct inode *inode = upper->d_inode;
537
538 return inode;
539}
540
541/**
542 * d_backing_dentry - Get upper or lower dentry we should be using
543 * @upper: The upper layer
544 *
545 * This is the helper that should be used to get the dentry of the inode that
546 * will be used if this dentry were opened as a file. It may be the upper
547 * dentry or it may be a lower dentry pinned by the upper.
548 *
549 * Normal filesystems should not use this to access their own dentries.
550 */
551static inline struct dentry *d_backing_dentry(struct dentry *upper)
552{
553 return upper;
554}
555
467#endif /* __LINUX_DCACHE_H */ 556#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 40cd75e21ea2..b6997a0cb528 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -189,25 +189,6 @@ enum dma_ctrl_flags {
189}; 189};
190 190
191/** 191/**
192 * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
193 * on a running channel.
194 * @DMA_TERMINATE_ALL: terminate all ongoing transfers
195 * @DMA_PAUSE: pause ongoing transfers
196 * @DMA_RESUME: resume paused transfer
197 * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
198 * that need to runtime reconfigure the slave channels (as opposed to passing
199 * configuration data in statically from the platform). An additional
200 * argument of struct dma_slave_config must be passed in with this
201 * command.
202 */
203enum dma_ctrl_cmd {
204 DMA_TERMINATE_ALL,
205 DMA_PAUSE,
206 DMA_RESUME,
207 DMA_SLAVE_CONFIG,
208};
209
210/**
211 * enum sum_check_bits - bit position of pq_check_flags 192 * enum sum_check_bits - bit position of pq_check_flags
212 */ 193 */
213enum sum_check_bits { 194enum sum_check_bits {
@@ -298,6 +279,9 @@ enum dma_slave_buswidth {
298 DMA_SLAVE_BUSWIDTH_3_BYTES = 3, 279 DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
299 DMA_SLAVE_BUSWIDTH_4_BYTES = 4, 280 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
300 DMA_SLAVE_BUSWIDTH_8_BYTES = 8, 281 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
282 DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
283 DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
284 DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
301}; 285};
302 286
303/** 287/**
@@ -336,9 +320,8 @@ enum dma_slave_buswidth {
336 * This struct is passed in as configuration data to a DMA engine 320 * This struct is passed in as configuration data to a DMA engine
337 * in order to set up a certain channel for DMA transport at runtime. 321 * in order to set up a certain channel for DMA transport at runtime.
338 * The DMA device/engine has to provide support for an additional 322 * The DMA device/engine has to provide support for an additional
339 * command in the channel config interface, DMA_SLAVE_CONFIG 323 * callback in the dma_device structure, device_config and this struct
340 * and this struct will then be passed in as an argument to the 324 * will then be passed in as an argument to the function.
341 * DMA engine device_control() function.
342 * 325 *
343 * The rationale for adding configuration information to this struct is as 326 * The rationale for adding configuration information to this struct is as
344 * follows: if it is likely that more than one DMA slave controllers in 327 * follows: if it is likely that more than one DMA slave controllers in
@@ -387,7 +370,7 @@ enum dma_residue_granularity {
387/* struct dma_slave_caps - expose capabilities of a slave channel only 370/* struct dma_slave_caps - expose capabilities of a slave channel only
388 * 371 *
389 * @src_addr_widths: bit mask of src addr widths the channel supports 372 * @src_addr_widths: bit mask of src addr widths the channel supports
390 * @dstn_addr_widths: bit mask of dstn addr widths the channel supports 373 * @dst_addr_widths: bit mask of dstn addr widths the channel supports
391 * @directions: bit mask of slave direction the channel supported 374 * @directions: bit mask of slave direction the channel supported
392 * since the enum dma_transfer_direction is not defined as bits for each 375 * since the enum dma_transfer_direction is not defined as bits for each
393 * type of direction, the dma controller should fill (1 << <TYPE>) and same 376 * type of direction, the dma controller should fill (1 << <TYPE>) and same
@@ -398,7 +381,7 @@ enum dma_residue_granularity {
398 */ 381 */
399struct dma_slave_caps { 382struct dma_slave_caps {
400 u32 src_addr_widths; 383 u32 src_addr_widths;
401 u32 dstn_addr_widths; 384 u32 dst_addr_widths;
402 u32 directions; 385 u32 directions;
403 bool cmd_pause; 386 bool cmd_pause;
404 bool cmd_terminate; 387 bool cmd_terminate;
@@ -594,6 +577,14 @@ struct dma_tx_state {
594 * @fill_align: alignment shift for memset operations 577 * @fill_align: alignment shift for memset operations
595 * @dev_id: unique device ID 578 * @dev_id: unique device ID
596 * @dev: struct device reference for dma mapping api 579 * @dev: struct device reference for dma mapping api
580 * @src_addr_widths: bit mask of src addr widths the device supports
581 * @dst_addr_widths: bit mask of dst addr widths the device supports
582 * @directions: bit mask of slave direction the device supports since
583 * the enum dma_transfer_direction is not defined as bits for
584 * each type of direction, the dma controller should fill (1 <<
585 * <TYPE>) and same should be checked by controller as well
586 * @residue_granularity: granularity of the transfer residue reported
587 * by tx_status
597 * @device_alloc_chan_resources: allocate resources and return the 588 * @device_alloc_chan_resources: allocate resources and return the
598 * number of allocated descriptors 589 * number of allocated descriptors
599 * @device_free_chan_resources: release DMA channel's resources 590 * @device_free_chan_resources: release DMA channel's resources
@@ -608,14 +599,19 @@ struct dma_tx_state {
608 * The function takes a buffer of size buf_len. The callback function will 599 * The function takes a buffer of size buf_len. The callback function will
609 * be called after period_len bytes have been transferred. 600 * be called after period_len bytes have been transferred.
610 * @device_prep_interleaved_dma: Transfer expression in a generic way. 601 * @device_prep_interleaved_dma: Transfer expression in a generic way.
611 * @device_control: manipulate all pending operations on a channel, returns 602 * @device_config: Pushes a new configuration to a channel, return 0 or an error
612 * zero or error code 603 * code
604 * @device_pause: Pauses any transfer happening on a channel. Returns
605 * 0 or an error code
606 * @device_resume: Resumes any transfer on a channel previously
607 * paused. Returns 0 or an error code
608 * @device_terminate_all: Aborts all transfers on a channel. Returns 0
609 * or an error code
613 * @device_tx_status: poll for transaction completion, the optional 610 * @device_tx_status: poll for transaction completion, the optional
614 * txstate parameter can be supplied with a pointer to get a 611 * txstate parameter can be supplied with a pointer to get a
615 * struct with auxiliary transfer status information, otherwise the call 612 * struct with auxiliary transfer status information, otherwise the call
616 * will just return a simple status code 613 * will just return a simple status code
617 * @device_issue_pending: push pending transactions to hardware 614 * @device_issue_pending: push pending transactions to hardware
618 * @device_slave_caps: return the slave channel capabilities
619 */ 615 */
620struct dma_device { 616struct dma_device {
621 617
@@ -635,14 +631,19 @@ struct dma_device {
635 int dev_id; 631 int dev_id;
636 struct device *dev; 632 struct device *dev;
637 633
634 u32 src_addr_widths;
635 u32 dst_addr_widths;
636 u32 directions;
637 enum dma_residue_granularity residue_granularity;
638
638 int (*device_alloc_chan_resources)(struct dma_chan *chan); 639 int (*device_alloc_chan_resources)(struct dma_chan *chan);
639 void (*device_free_chan_resources)(struct dma_chan *chan); 640 void (*device_free_chan_resources)(struct dma_chan *chan);
640 641
641 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( 642 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
642 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 643 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
643 size_t len, unsigned long flags); 644 size_t len, unsigned long flags);
644 struct dma_async_tx_descriptor *(*device_prep_dma_xor)( 645 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
645 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 646 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
646 unsigned int src_cnt, size_t len, unsigned long flags); 647 unsigned int src_cnt, size_t len, unsigned long flags);
647 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( 648 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
648 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, 649 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
@@ -674,31 +675,26 @@ struct dma_device {
674 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( 675 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
675 struct dma_chan *chan, struct dma_interleaved_template *xt, 676 struct dma_chan *chan, struct dma_interleaved_template *xt,
676 unsigned long flags); 677 unsigned long flags);
677 int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 678
678 unsigned long arg); 679 int (*device_config)(struct dma_chan *chan,
680 struct dma_slave_config *config);
681 int (*device_pause)(struct dma_chan *chan);
682 int (*device_resume)(struct dma_chan *chan);
683 int (*device_terminate_all)(struct dma_chan *chan);
679 684
680 enum dma_status (*device_tx_status)(struct dma_chan *chan, 685 enum dma_status (*device_tx_status)(struct dma_chan *chan,
681 dma_cookie_t cookie, 686 dma_cookie_t cookie,
682 struct dma_tx_state *txstate); 687 struct dma_tx_state *txstate);
683 void (*device_issue_pending)(struct dma_chan *chan); 688 void (*device_issue_pending)(struct dma_chan *chan);
684 int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
685}; 689};
686 690
687static inline int dmaengine_device_control(struct dma_chan *chan,
688 enum dma_ctrl_cmd cmd,
689 unsigned long arg)
690{
691 if (chan->device->device_control)
692 return chan->device->device_control(chan, cmd, arg);
693
694 return -ENOSYS;
695}
696
697static inline int dmaengine_slave_config(struct dma_chan *chan, 691static inline int dmaengine_slave_config(struct dma_chan *chan,
698 struct dma_slave_config *config) 692 struct dma_slave_config *config)
699{ 693{
700 return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, 694 if (chan->device->device_config)
701 (unsigned long)config); 695 return chan->device->device_config(chan, config);
696
697 return -ENOSYS;
702} 698}
703 699
704static inline bool is_slave_direction(enum dma_transfer_direction direction) 700static inline bool is_slave_direction(enum dma_transfer_direction direction)
@@ -765,34 +761,28 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
765 src_sg, src_nents, flags); 761 src_sg, src_nents, flags);
766} 762}
767 763
768static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
769{
770 if (!chan || !caps)
771 return -EINVAL;
772
773 /* check if the channel supports slave transactions */
774 if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
775 return -ENXIO;
776
777 if (chan->device->device_slave_caps)
778 return chan->device->device_slave_caps(chan, caps);
779
780 return -ENXIO;
781}
782
783static inline int dmaengine_terminate_all(struct dma_chan *chan) 764static inline int dmaengine_terminate_all(struct dma_chan *chan)
784{ 765{
785 return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 766 if (chan->device->device_terminate_all)
767 return chan->device->device_terminate_all(chan);
768
769 return -ENOSYS;
786} 770}
787 771
788static inline int dmaengine_pause(struct dma_chan *chan) 772static inline int dmaengine_pause(struct dma_chan *chan)
789{ 773{
790 return dmaengine_device_control(chan, DMA_PAUSE, 0); 774 if (chan->device->device_pause)
775 return chan->device->device_pause(chan);
776
777 return -ENOSYS;
791} 778}
792 779
793static inline int dmaengine_resume(struct dma_chan *chan) 780static inline int dmaengine_resume(struct dma_chan *chan)
794{ 781{
795 return dmaengine_device_control(chan, DMA_RESUME, 0); 782 if (chan->device->device_resume)
783 return chan->device->device_resume(chan);
784
785 return -ENOSYS;
796} 786}
797 787
798static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan, 788static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
@@ -1059,6 +1049,7 @@ struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
1059 const char *name); 1049 const char *name);
1060struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); 1050struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1061void dma_release_channel(struct dma_chan *chan); 1051void dma_release_channel(struct dma_chan *chan);
1052int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1062#else 1053#else
1063static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 1054static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1064{ 1055{
@@ -1093,6 +1084,11 @@ static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1093static inline void dma_release_channel(struct dma_chan *chan) 1084static inline void dma_release_channel(struct dma_chan *chan)
1094{ 1085{
1095} 1086}
1087static inline int dma_get_slave_caps(struct dma_chan *chan,
1088 struct dma_slave_caps *caps)
1089{
1090 return -ENXIO;
1091}
1096#endif 1092#endif
1097 1093
1098/* --- DMA device --- */ 1094/* --- DMA device --- */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 447932aed1e1..b4d71b5e1ff2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -968,9 +968,6 @@ struct file_lock_context {
968 struct list_head flc_flock; 968 struct list_head flc_flock;
969 struct list_head flc_posix; 969 struct list_head flc_posix;
970 struct list_head flc_lease; 970 struct list_head flc_lease;
971 int flc_flock_cnt;
972 int flc_posix_cnt;
973 int flc_lease_cnt;
974}; 971};
975 972
976/* The following constant reflects the upper bound of the file/locking space */ 973/* The following constant reflects the upper bound of the file/locking space */
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 51f7ccadf923..4173a8fdad9e 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -33,6 +33,8 @@
33 * @units: Measurment unit for this attribute. 33 * @units: Measurment unit for this attribute.
34 * @unit_expo: Exponent used in the data. 34 * @unit_expo: Exponent used in the data.
35 * @size: Size in bytes for data size. 35 * @size: Size in bytes for data size.
36 * @logical_minimum: Logical minimum value for this attribute.
37 * @logical_maximum: Logical maximum value for this attribute.
36 */ 38 */
37struct hid_sensor_hub_attribute_info { 39struct hid_sensor_hub_attribute_info {
38 u32 usage_id; 40 u32 usage_id;
@@ -146,6 +148,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
146 148
147/** 149/**
148* sensor_hub_input_attr_get_raw_value() - Synchronous read request 150* sensor_hub_input_attr_get_raw_value() - Synchronous read request
151* @hsdev: Hub device instance.
149* @usage_id: Attribute usage id of parent physical device as per spec 152* @usage_id: Attribute usage id of parent physical device as per spec
150* @attr_usage_id: Attribute usage id as per spec 153* @attr_usage_id: Attribute usage id as per spec
151* @report_id: Report id to look for 154* @report_id: Report id to look for
@@ -160,6 +163,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
160 u32 attr_usage_id, u32 report_id); 163 u32 attr_usage_id, u32 report_id);
161/** 164/**
162* sensor_hub_set_feature() - Feature set request 165* sensor_hub_set_feature() - Feature set request
166* @hsdev: Hub device instance.
163* @report_id: Report id to look for 167* @report_id: Report id to look for
164* @field_index: Field index inside a report 168* @field_index: Field index inside a report
165* @value: Value to set 169* @value: Value to set
@@ -172,6 +176,7 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
172 176
173/** 177/**
174* sensor_hub_get_feature() - Feature get request 178* sensor_hub_get_feature() - Feature get request
179* @hsdev: Hub device instance.
175* @report_id: Report id to look for 180* @report_id: Report id to look for
176* @field_index: Field index inside a report 181* @field_index: Field index inside a report
177* @value: Place holder for return value 182* @value: Place holder for return value
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 7c7695940ddd..f17da50402a4 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -130,8 +130,6 @@ extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
130 * @probe: Callback for device binding 130 * @probe: Callback for device binding
131 * @remove: Callback for device unbinding 131 * @remove: Callback for device unbinding
132 * @shutdown: Callback for device shutdown 132 * @shutdown: Callback for device shutdown
133 * @suspend: Callback for device suspend
134 * @resume: Callback for device resume
135 * @alert: Alert callback, for example for the SMBus alert protocol 133 * @alert: Alert callback, for example for the SMBus alert protocol
136 * @command: Callback for bus-wide signaling (optional) 134 * @command: Callback for bus-wide signaling (optional)
137 * @driver: Device driver model driver 135 * @driver: Device driver model driver
@@ -174,8 +172,6 @@ struct i2c_driver {
174 172
175 /* driver model interfaces that don't relate to enumeration */ 173 /* driver model interfaces that don't relate to enumeration */
176 void (*shutdown)(struct i2c_client *); 174 void (*shutdown)(struct i2c_client *);
177 int (*suspend)(struct i2c_client *, pm_message_t mesg);
178 int (*resume)(struct i2c_client *);
179 175
180 /* Alert callback, for example for the SMBus alert protocol. 176 /* Alert callback, for example for the SMBus alert protocol.
181 * The format and meaning of the data value depends on the protocol. 177 * The format and meaning of the data value depends on the protocol.
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 420f77b34d02..e6a6aac451db 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -243,7 +243,6 @@ extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
243extern void gic_send_ipi(unsigned int intr); 243extern void gic_send_ipi(unsigned int intr);
244extern unsigned int plat_ipi_call_int_xlate(unsigned int); 244extern unsigned int plat_ipi_call_int_xlate(unsigned int);
245extern unsigned int plat_ipi_resched_int_xlate(unsigned int); 245extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
246extern unsigned int gic_get_timer_pending(void);
247extern int gic_get_c0_compare_int(void); 246extern int gic_get_c0_compare_int(void);
248extern int gic_get_c0_perfcount_int(void); 247extern int gic_get_c0_perfcount_int(void);
249#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */ 248#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 75ae2e2631fc..a19bcf9e762e 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -156,8 +156,14 @@ typedef enum {
156 KDB_REASON_SYSTEM_NMI, /* In NMI due to SYSTEM cmd; regs valid */ 156 KDB_REASON_SYSTEM_NMI, /* In NMI due to SYSTEM cmd; regs valid */
157} kdb_reason_t; 157} kdb_reason_t;
158 158
159enum kdb_msgsrc {
160 KDB_MSGSRC_INTERNAL, /* direct call to kdb_printf() */
161 KDB_MSGSRC_PRINTK, /* trapped from printk() */
162};
163
159extern int kdb_trap_printk; 164extern int kdb_trap_printk;
160extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args); 165extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
166 va_list args);
161extern __printf(1, 2) int kdb_printf(const char *, ...); 167extern __printf(1, 2) int kdb_printf(const char *, ...);
162typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); 168typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
163 169
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
index 495203ff221c..acd5b12565cc 100644
--- a/include/linux/lguest_launcher.h
+++ b/include/linux/lguest_launcher.h
@@ -8,52 +8,13 @@
8 * 8 *
9 * The Guest needs devices to do anything useful. Since we don't let it touch 9 * The Guest needs devices to do anything useful. Since we don't let it touch
10 * real devices (think of the damage it could do!) we provide virtual devices. 10 * real devices (think of the damage it could do!) we provide virtual devices.
11 * We could emulate a PCI bus with various devices on it, but that is a fairly 11 * We emulate a PCI bus with virtio devices on it; we used to have our own
12 * complex burden for the Host and suboptimal for the Guest, so we have our own 12 * lguest bus which was far simpler, but this tests the virtio 1.0 standard.
13 * simple lguest bus and we use "virtio" drivers. These drivers need a set of
14 * routines from us which will actually do the virtual I/O, but they handle all
15 * the net/block/console stuff themselves. This means that if we want to add
16 * a new device, we simply need to write a new virtio driver and create support
17 * for it in the Launcher: this code won't need to change.
18 * 13 *
19 * Virtio devices are also used by kvm, so we can simply reuse their optimized 14 * Virtio devices are also used by kvm, so we can simply reuse their optimized
20 * device drivers. And one day when everyone uses virtio, my plan will be 15 * device drivers. And one day when everyone uses virtio, my plan will be
21 * complete. Bwahahahah! 16 * complete. Bwahahahah!
22 *
23 * Devices are described by a simplified ID, a status byte, and some "config"
24 * bytes which describe this device's configuration. This is placed by the
25 * Launcher just above the top of physical memory:
26 */
27struct lguest_device_desc {
28 /* The device type: console, network, disk etc. Type 0 terminates. */
29 __u8 type;
30 /* The number of virtqueues (first in config array) */
31 __u8 num_vq;
32 /*
33 * The number of bytes of feature bits. Multiply by 2: one for host
34 * features and one for Guest acknowledgements.
35 */
36 __u8 feature_len;
37 /* The number of bytes of the config array after virtqueues. */
38 __u8 config_len;
39 /* A status byte, written by the Guest. */
40 __u8 status;
41 __u8 config[0];
42};
43
44/*D:135
45 * This is how we expect the device configuration field for a virtqueue
46 * to be laid out in config space.
47 */ 17 */
48struct lguest_vqconfig {
49 /* The number of entries in the virtio_ring */
50 __u16 num;
51 /* The interrupt we get when something happens. */
52 __u16 irq;
53 /* The page number of the virtio ring for this device. */
54 __u32 pfn;
55};
56/*:*/
57 18
58/* Write command first word is a request. */ 19/* Write command first word is a request. */
59enum lguest_req 20enum lguest_req
@@ -62,12 +23,22 @@ enum lguest_req
62 LHREQ_GETDMA, /* No longer used */ 23 LHREQ_GETDMA, /* No longer used */
63 LHREQ_IRQ, /* + irq */ 24 LHREQ_IRQ, /* + irq */
64 LHREQ_BREAK, /* No longer used */ 25 LHREQ_BREAK, /* No longer used */
65 LHREQ_EVENTFD, /* + address, fd. */ 26 LHREQ_EVENTFD, /* No longer used. */
27 LHREQ_GETREG, /* + offset within struct pt_regs (then read value). */
28 LHREQ_SETREG, /* + offset within struct pt_regs, value. */
29 LHREQ_TRAP, /* + trap number to deliver to guest. */
66}; 30};
67 31
68/* 32/*
69 * The alignment to use between consumer and producer parts of vring. 33 * This is what read() of the lguest fd populates. trap ==
70 * x86 pagesize for historical reasons. 34 * LGUEST_TRAP_ENTRY for an LHCALL_NOTIFY (addr is the
35 * argument), 14 for a page fault in the MMIO region (addr is
36 * the trap address, insn is the instruction), or 13 for a GPF
37 * (insn is the instruction).
71 */ 38 */
72#define LGUEST_VRING_ALIGN 4096 39struct lguest_pending {
40 __u8 trap;
41 __u8 insn[7];
42 __u32 addr;
43};
73#endif /* _LINUX_LGUEST_LAUNCHER */ 44#endif /* _LINUX_LGUEST_LAUNCHER */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 81589d176ae8..dfabd6db7ddf 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -124,10 +124,27 @@ enum {
124#define AXP288_PMIC_ADC_H 0x56 124#define AXP288_PMIC_ADC_H 0x56
125#define AXP288_PMIC_ADC_L 0x57 125#define AXP288_PMIC_ADC_L 0x57
126#define AXP288_ADC_TS_PIN_CTRL 0x84 126#define AXP288_ADC_TS_PIN_CTRL 0x84
127
128#define AXP288_PMIC_ADC_EN 0x84 127#define AXP288_PMIC_ADC_EN 0x84
129#define AXP288_FG_TUNE5 0xed
130 128
129/* Fuel Gauge */
130#define AXP288_FG_RDC1_REG 0xba
131#define AXP288_FG_RDC0_REG 0xbb
132#define AXP288_FG_OCVH_REG 0xbc
133#define AXP288_FG_OCVL_REG 0xbd
134#define AXP288_FG_OCV_CURVE_REG 0xc0
135#define AXP288_FG_DES_CAP1_REG 0xe0
136#define AXP288_FG_DES_CAP0_REG 0xe1
137#define AXP288_FG_CC_MTR1_REG 0xe2
138#define AXP288_FG_CC_MTR0_REG 0xe3
139#define AXP288_FG_OCV_CAP_REG 0xe4
140#define AXP288_FG_CC_CAP_REG 0xe5
141#define AXP288_FG_LOW_CAP_REG 0xe6
142#define AXP288_FG_TUNE0 0xe8
143#define AXP288_FG_TUNE1 0xe9
144#define AXP288_FG_TUNE2 0xea
145#define AXP288_FG_TUNE3 0xeb
146#define AXP288_FG_TUNE4 0xec
147#define AXP288_FG_TUNE5 0xed
131 148
132/* Regulators IDs */ 149/* Regulators IDs */
133enum { 150enum {
@@ -236,4 +253,26 @@ struct axp20x_dev {
236 const struct regmap_irq_chip *regmap_irq_chip; 253 const struct regmap_irq_chip *regmap_irq_chip;
237}; 254};
238 255
256#define BATTID_LEN 64
257#define OCV_CURVE_SIZE 32
258#define MAX_THERM_CURVE_SIZE 25
259#define PD_DEF_MIN_TEMP 0
260#define PD_DEF_MAX_TEMP 55
261
262struct axp20x_fg_pdata {
263 char battid[BATTID_LEN + 1];
264 int design_cap;
265 int min_volt;
266 int max_volt;
267 int max_temp;
268 int min_temp;
269 int cap1;
270 int cap0;
271 int rdc1;
272 int rdc0;
273 int ocv_curve[OCV_CURVE_SIZE];
274 int tcsz;
275 int thermistor_curve[MAX_THERM_CURVE_SIZE][2];
276};
277
239#endif /* __LINUX_MFD_AXP20X_H */ 278#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index b92a3262f8f6..79f4d822ba13 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -36,6 +36,7 @@ enum da9063_models {
36enum da9063_variant_codes { 36enum da9063_variant_codes {
37 PMIC_DA9063_AD = 0x3, 37 PMIC_DA9063_AD = 0x3,
38 PMIC_DA9063_BB = 0x5, 38 PMIC_DA9063_BB = 0x5,
39 PMIC_DA9063_CA = 0x6,
39}; 40};
40 41
41/* Interrupts */ 42/* Interrupts */
diff --git a/include/linux/mfd/da9150/core.h b/include/linux/mfd/da9150/core.h
new file mode 100644
index 000000000000..76e668933a77
--- /dev/null
+++ b/include/linux/mfd/da9150/core.h
@@ -0,0 +1,68 @@
1/*
2 * DA9150 MFD Driver - Core Data
3 *
4 * Copyright (c) 2014 Dialog Semiconductor
5 *
6 * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef __DA9150_CORE_H
15#define __DA9150_CORE_H
16
17#include <linux/device.h>
18#include <linux/interrupt.h>
19#include <linux/regmap.h>
20
21/* I2C address paging */
22#define DA9150_REG_PAGE_SHIFT 8
23#define DA9150_REG_PAGE_MASK 0xFF
24
25/* IRQs */
26#define DA9150_NUM_IRQ_REGS 4
27#define DA9150_IRQ_VBUS 0
28#define DA9150_IRQ_CHG 1
29#define DA9150_IRQ_TCLASS 2
30#define DA9150_IRQ_TJUNC 3
31#define DA9150_IRQ_VFAULT 4
32#define DA9150_IRQ_CONF 5
33#define DA9150_IRQ_DAT 6
34#define DA9150_IRQ_DTYPE 7
35#define DA9150_IRQ_ID 8
36#define DA9150_IRQ_ADP 9
37#define DA9150_IRQ_SESS_END 10
38#define DA9150_IRQ_SESS_VLD 11
39#define DA9150_IRQ_FG 12
40#define DA9150_IRQ_GP 13
41#define DA9150_IRQ_TBAT 14
42#define DA9150_IRQ_GPIOA 15
43#define DA9150_IRQ_GPIOB 16
44#define DA9150_IRQ_GPIOC 17
45#define DA9150_IRQ_GPIOD 18
46#define DA9150_IRQ_GPADC 19
47#define DA9150_IRQ_WKUP 20
48
49struct da9150_pdata {
50 int irq_base;
51};
52
53struct da9150 {
54 struct device *dev;
55 struct regmap *regmap;
56 struct regmap_irq_chip_data *regmap_irq_data;
57 int irq;
58 int irq_base;
59};
60
61/* Device I/O */
62u8 da9150_reg_read(struct da9150 *da9150, u16 reg);
63void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val);
64void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val);
65
66void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf);
67void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf);
68#endif /* __DA9150_CORE_H */
diff --git a/include/linux/mfd/da9150/registers.h b/include/linux/mfd/da9150/registers.h
new file mode 100644
index 000000000000..27ca6ee4d840
--- /dev/null
+++ b/include/linux/mfd/da9150/registers.h
@@ -0,0 +1,1155 @@
1/*
2 * DA9150 MFD Driver - Registers
3 *
4 * Copyright (c) 2014 Dialog Semiconductor
5 *
6 * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef __DA9150_REGISTERS_H
15#define __DA9150_REGISTERS_H
16
17#include <linux/bitops.h>
18
19/* Registers */
20#define DA9150_PAGE_CON 0x000
21#define DA9150_STATUS_A 0x068
22#define DA9150_STATUS_B 0x069
23#define DA9150_STATUS_C 0x06A
24#define DA9150_STATUS_D 0x06B
25#define DA9150_STATUS_E 0x06C
26#define DA9150_STATUS_F 0x06D
27#define DA9150_STATUS_G 0x06E
28#define DA9150_STATUS_H 0x06F
29#define DA9150_STATUS_I 0x070
30#define DA9150_STATUS_J 0x071
31#define DA9150_STATUS_K 0x072
32#define DA9150_STATUS_L 0x073
33#define DA9150_STATUS_N 0x074
34#define DA9150_FAULT_LOG_A 0x076
35#define DA9150_FAULT_LOG_B 0x077
36#define DA9150_EVENT_E 0x078
37#define DA9150_EVENT_F 0x079
38#define DA9150_EVENT_G 0x07A
39#define DA9150_EVENT_H 0x07B
40#define DA9150_IRQ_MASK_E 0x07C
41#define DA9150_IRQ_MASK_F 0x07D
42#define DA9150_IRQ_MASK_G 0x07E
43#define DA9150_IRQ_MASK_H 0x07F
44#define DA9150_PAGE_CON_1 0x080
45#define DA9150_CONFIG_A 0x0E0
46#define DA9150_CONFIG_B 0x0E1
47#define DA9150_CONFIG_C 0x0E2
48#define DA9150_CONFIG_D 0x0E3
49#define DA9150_CONFIG_E 0x0E4
50#define DA9150_CONTROL_A 0x0E5
51#define DA9150_CONTROL_B 0x0E6
52#define DA9150_CONTROL_C 0x0E7
53#define DA9150_GPIO_A_B 0x0E8
54#define DA9150_GPIO_C_D 0x0E9
55#define DA9150_GPIO_MODE_CONT 0x0EA
56#define DA9150_GPIO_CTRL_B 0x0EB
57#define DA9150_GPIO_CTRL_A 0x0EC
58#define DA9150_GPIO_CTRL_C 0x0ED
59#define DA9150_GPIO_CFG_A 0x0EE
60#define DA9150_GPIO_CFG_B 0x0EF
61#define DA9150_GPIO_CFG_C 0x0F0
62#define DA9150_GPADC_MAN 0x0F2
63#define DA9150_GPADC_RES_A 0x0F4
64#define DA9150_GPADC_RES_B 0x0F5
65#define DA9150_PAGE_CON_2 0x100
66#define DA9150_OTP_CONT_SHARED 0x101
67#define DA9150_INTERFACE_SHARED 0x105
68#define DA9150_CONFIG_A_SHARED 0x106
69#define DA9150_CONFIG_D_SHARED 0x109
70#define DA9150_ADETVB_CFG_C 0x150
71#define DA9150_ADETD_STAT 0x151
72#define DA9150_ADET_CMPSTAT 0x152
73#define DA9150_ADET_CTRL_A 0x153
74#define DA9150_ADETVB_CFG_B 0x154
75#define DA9150_ADETVB_CFG_A 0x155
76#define DA9150_ADETAC_CFG_A 0x156
77#define DA9150_ADDETAC_CFG_B 0x157
78#define DA9150_ADETAC_CFG_C 0x158
79#define DA9150_ADETAC_CFG_D 0x159
80#define DA9150_ADETVB_CFG_D 0x15A
81#define DA9150_ADETID_CFG_A 0x15B
82#define DA9150_ADET_RID_PT_CHG_H 0x15C
83#define DA9150_ADET_RID_PT_CHG_L 0x15D
84#define DA9150_PPR_TCTR_B 0x160
85#define DA9150_PPR_BKCTRL_A 0x163
86#define DA9150_PPR_BKCFG_A 0x164
87#define DA9150_PPR_BKCFG_B 0x165
88#define DA9150_PPR_CHGCTRL_A 0x166
89#define DA9150_PPR_CHGCTRL_B 0x167
90#define DA9150_PPR_CHGCTRL_C 0x168
91#define DA9150_PPR_TCTR_A 0x169
92#define DA9150_PPR_CHGCTRL_D 0x16A
93#define DA9150_PPR_CHGCTRL_E 0x16B
94#define DA9150_PPR_CHGCTRL_F 0x16C
95#define DA9150_PPR_CHGCTRL_G 0x16D
96#define DA9150_PPR_CHGCTRL_H 0x16E
97#define DA9150_PPR_CHGCTRL_I 0x16F
98#define DA9150_PPR_CHGCTRL_J 0x170
99#define DA9150_PPR_CHGCTRL_K 0x171
100#define DA9150_PPR_CHGCTRL_L 0x172
101#define DA9150_PPR_CHGCTRL_M 0x173
102#define DA9150_PPR_THYST_A 0x174
103#define DA9150_PPR_THYST_B 0x175
104#define DA9150_PPR_THYST_C 0x176
105#define DA9150_PPR_THYST_D 0x177
106#define DA9150_PPR_THYST_E 0x178
107#define DA9150_PPR_THYST_F 0x179
108#define DA9150_PPR_THYST_G 0x17A
109#define DA9150_PAGE_CON_3 0x180
110#define DA9150_PAGE_CON_4 0x200
111#define DA9150_PAGE_CON_5 0x280
112#define DA9150_PAGE_CON_6 0x300
113#define DA9150_COREBTLD_STAT_A 0x302
114#define DA9150_COREBTLD_CTRL_A 0x303
115#define DA9150_CORE_CONFIG_A 0x304
116#define DA9150_CORE_CONFIG_C 0x305
117#define DA9150_CORE_CONFIG_B 0x306
118#define DA9150_CORE_CFG_DATA_A 0x307
119#define DA9150_CORE_CFG_DATA_B 0x308
120#define DA9150_CORE_CMD_A 0x309
121#define DA9150_CORE_DATA_A 0x30A
122#define DA9150_CORE_DATA_B 0x30B
123#define DA9150_CORE_DATA_C 0x30C
124#define DA9150_CORE_DATA_D 0x30D
125#define DA9150_CORE2WIRE_STAT_A 0x310
126#define DA9150_CORE2WIRE_CTRL_A 0x311
127#define DA9150_FW_CTRL_A 0x312
128#define DA9150_FW_CTRL_C 0x313
129#define DA9150_FW_CTRL_D 0x314
130#define DA9150_FG_CTRL_A 0x315
131#define DA9150_FG_CTRL_B 0x316
132#define DA9150_FW_CTRL_E 0x317
133#define DA9150_FW_CTRL_B 0x318
134#define DA9150_GPADC_CMAN 0x320
135#define DA9150_GPADC_CRES_A 0x322
136#define DA9150_GPADC_CRES_B 0x323
137#define DA9150_CC_CFG_A 0x328
138#define DA9150_CC_CFG_B 0x329
139#define DA9150_CC_ICHG_RES_A 0x32A
140#define DA9150_CC_ICHG_RES_B 0x32B
141#define DA9150_CC_IAVG_RES_A 0x32C
142#define DA9150_CC_IAVG_RES_B 0x32D
143#define DA9150_TAUX_CTRL_A 0x330
144#define DA9150_TAUX_RELOAD_H 0x332
145#define DA9150_TAUX_RELOAD_L 0x333
146#define DA9150_TAUX_VALUE_H 0x334
147#define DA9150_TAUX_VALUE_L 0x335
148#define DA9150_AUX_DATA_0 0x338
149#define DA9150_AUX_DATA_1 0x339
150#define DA9150_AUX_DATA_2 0x33A
151#define DA9150_AUX_DATA_3 0x33B
152#define DA9150_BIF_CTRL 0x340
153#define DA9150_TBAT_CTRL_A 0x342
154#define DA9150_TBAT_CTRL_B 0x343
155#define DA9150_TBAT_RES_A 0x344
156#define DA9150_TBAT_RES_B 0x345
157
158/* DA9150_PAGE_CON = 0x000 */
159#define DA9150_PAGE_SHIFT 0
160#define DA9150_PAGE_MASK (0x3f << 0)
161#define DA9150_I2C_PAGE_SHIFT 1
162#define DA9150_I2C_PAGE_MASK (0x1f << 1)
163#define DA9150_WRITE_MODE_SHIFT 6
164#define DA9150_WRITE_MODE_MASK BIT(6)
165#define DA9150_REVERT_SHIFT 7
166#define DA9150_REVERT_MASK BIT(7)
167
168/* DA9150_STATUS_A = 0x068 */
169#define DA9150_WKUP_STAT_SHIFT 2
170#define DA9150_WKUP_STAT_MASK (0x0f << 2)
171#define DA9150_SLEEP_STAT_SHIFT 6
172#define DA9150_SLEEP_STAT_MASK (0x03 << 6)
173
174/* DA9150_STATUS_B = 0x069 */
175#define DA9150_VFAULT_STAT_SHIFT 0
176#define DA9150_VFAULT_STAT_MASK BIT(0)
177#define DA9150_TFAULT_STAT_SHIFT 1
178#define DA9150_TFAULT_STAT_MASK BIT(1)
179
180/* DA9150_STATUS_C = 0x06A */
181#define DA9150_VDD33_STAT_SHIFT 0
182#define DA9150_VDD33_STAT_MASK BIT(0)
183#define DA9150_VDD33_SLEEP_SHIFT 1
184#define DA9150_VDD33_SLEEP_MASK BIT(1)
185#define DA9150_LFOSC_STAT_SHIFT 7
186#define DA9150_LFOSC_STAT_MASK BIT(7)
187
188/* DA9150_STATUS_D = 0x06B */
189#define DA9150_GPIOA_STAT_SHIFT 0
190#define DA9150_GPIOA_STAT_MASK BIT(0)
191#define DA9150_GPIOB_STAT_SHIFT 1
192#define DA9150_GPIOB_STAT_MASK BIT(1)
193#define DA9150_GPIOC_STAT_SHIFT 2
194#define DA9150_GPIOC_STAT_MASK BIT(2)
195#define DA9150_GPIOD_STAT_SHIFT 3
196#define DA9150_GPIOD_STAT_MASK BIT(3)
197
198/* DA9150_STATUS_E = 0x06C */
199#define DA9150_DTYPE_SHIFT 0
200#define DA9150_DTYPE_MASK (0x1f << 0)
201#define DA9150_DTYPE_DT_NIL (0x00 << 0)
202#define DA9150_DTYPE_DT_USB_OTG BIT(0)
203#define DA9150_DTYPE_DT_USB_STD (0x02 << 0)
204#define DA9150_DTYPE_DT_USB_CHG (0x03 << 0)
205#define DA9150_DTYPE_DT_ACA_CHG (0x04 << 0)
206#define DA9150_DTYPE_DT_ACA_OTG (0x05 << 0)
207#define DA9150_DTYPE_DT_ACA_DOC (0x06 << 0)
208#define DA9150_DTYPE_DT_DED_CHG (0x07 << 0)
209#define DA9150_DTYPE_DT_CR5_CHG (0x08 << 0)
210#define DA9150_DTYPE_DT_CR4_CHG (0x0c << 0)
211#define DA9150_DTYPE_DT_PT_CHG (0x11 << 0)
212#define DA9150_DTYPE_DT_NN_ACC (0x16 << 0)
213#define DA9150_DTYPE_DT_NN_CHG (0x17 << 0)
214
215/* DA9150_STATUS_F = 0x06D */
216#define DA9150_SESS_VLD_SHIFT 0
217#define DA9150_SESS_VLD_MASK BIT(0)
218#define DA9150_ID_ERR_SHIFT 1
219#define DA9150_ID_ERR_MASK BIT(1)
220#define DA9150_PT_CHG_SHIFT 2
221#define DA9150_PT_CHG_MASK BIT(2)
222
223/* DA9150_STATUS_G = 0x06E */
224#define DA9150_RID_SHIFT 0
225#define DA9150_RID_MASK (0xff << 0)
226
227/* DA9150_STATUS_H = 0x06F */
228#define DA9150_VBUS_STAT_SHIFT 0
229#define DA9150_VBUS_STAT_MASK (0x07 << 0)
230#define DA9150_VBUS_STAT_OFF (0x00 << 0)
231#define DA9150_VBUS_STAT_WAIT BIT(0)
232#define DA9150_VBUS_STAT_CHG (0x02 << 0)
233#define DA9150_VBUS_TRED_SHIFT 3
234#define DA9150_VBUS_TRED_MASK BIT(3)
235#define DA9150_VBUS_DROP_STAT_SHIFT 4
236#define DA9150_VBUS_DROP_STAT_MASK (0x0f << 4)
237
238/* DA9150_STATUS_I = 0x070 */
239#define DA9150_VBUS_ISET_STAT_SHIFT 0
240#define DA9150_VBUS_ISET_STAT_MASK (0x1f << 0)
241#define DA9150_VBUS_OT_SHIFT 7
242#define DA9150_VBUS_OT_MASK BIT(7)
243
244/* DA9150_STATUS_J = 0x071 */
245#define DA9150_CHG_STAT_SHIFT 0
246#define DA9150_CHG_STAT_MASK (0x0f << 0)
247#define DA9150_CHG_STAT_OFF (0x00 << 0)
248#define DA9150_CHG_STAT_SUSP BIT(0)
249#define DA9150_CHG_STAT_ACT (0x02 << 0)
250#define DA9150_CHG_STAT_PRE (0x03 << 0)
251#define DA9150_CHG_STAT_CC (0x04 << 0)
252#define DA9150_CHG_STAT_CV (0x05 << 0)
253#define DA9150_CHG_STAT_FULL (0x06 << 0)
254#define DA9150_CHG_STAT_TEMP (0x07 << 0)
255#define DA9150_CHG_STAT_TIME (0x08 << 0)
256#define DA9150_CHG_STAT_BAT (0x09 << 0)
257#define DA9150_CHG_TEMP_SHIFT 4
258#define DA9150_CHG_TEMP_MASK (0x07 << 4)
259#define DA9150_CHG_TEMP_UNDER (0x06 << 4)
260#define DA9150_CHG_TEMP_OVER (0x07 << 4)
261#define DA9150_CHG_IEND_STAT_SHIFT 7
262#define DA9150_CHG_IEND_STAT_MASK BIT(7)
263
264/* DA9150_STATUS_K = 0x072 */
265#define DA9150_CHG_IAV_H_SHIFT 0
266#define DA9150_CHG_IAV_H_MASK (0xff << 0)
267
268/* DA9150_STATUS_L = 0x073 */
269#define DA9150_CHG_IAV_L_SHIFT 5
270#define DA9150_CHG_IAV_L_MASK (0x07 << 5)
271
272/* DA9150_STATUS_N = 0x074 */
273#define DA9150_CHG_TIME_SHIFT 1
274#define DA9150_CHG_TIME_MASK BIT(1)
275#define DA9150_CHG_TRED_SHIFT 2
276#define DA9150_CHG_TRED_MASK BIT(2)
277#define DA9150_CHG_TJUNC_CLASS_SHIFT 3
278#define DA9150_CHG_TJUNC_CLASS_MASK (0x07 << 3)
279#define DA9150_CHG_TJUNC_CLASS_6 (0x06 << 3)
280#define DA9150_EBS_STAT_SHIFT 6
281#define DA9150_EBS_STAT_MASK BIT(6)
282#define DA9150_CHG_BAT_REMOVED_SHIFT 7
283#define DA9150_CHG_BAT_REMOVED_MASK BIT(7)
284
285/* DA9150_FAULT_LOG_A = 0x076 */
286#define DA9150_TEMP_FAULT_SHIFT 0
287#define DA9150_TEMP_FAULT_MASK BIT(0)
288#define DA9150_VSYS_FAULT_SHIFT 1
289#define DA9150_VSYS_FAULT_MASK BIT(1)
290#define DA9150_START_FAULT_SHIFT 2
291#define DA9150_START_FAULT_MASK BIT(2)
292#define DA9150_EXT_FAULT_SHIFT 3
293#define DA9150_EXT_FAULT_MASK BIT(3)
294#define DA9150_POR_FAULT_SHIFT 4
295#define DA9150_POR_FAULT_MASK BIT(4)
296
297/* DA9150_FAULT_LOG_B = 0x077 */
298#define DA9150_VBUS_FAULT_SHIFT 0
299#define DA9150_VBUS_FAULT_MASK BIT(0)
300#define DA9150_OTG_FAULT_SHIFT 1
301#define DA9150_OTG_FAULT_MASK BIT(1)
302
303/* DA9150_EVENT_E = 0x078 */
304#define DA9150_E_VBUS_SHIFT 0
305#define DA9150_E_VBUS_MASK BIT(0)
306#define DA9150_E_CHG_SHIFT 1
307#define DA9150_E_CHG_MASK BIT(1)
308#define DA9150_E_TCLASS_SHIFT 2
309#define DA9150_E_TCLASS_MASK BIT(2)
310#define DA9150_E_TJUNC_SHIFT 3
311#define DA9150_E_TJUNC_MASK BIT(3)
312#define DA9150_E_VFAULT_SHIFT 4
313#define DA9150_E_VFAULT_MASK BIT(4)
314#define DA9150_EVENTS_H_SHIFT 5
315#define DA9150_EVENTS_H_MASK BIT(5)
316#define DA9150_EVENTS_G_SHIFT 6
317#define DA9150_EVENTS_G_MASK BIT(6)
318#define DA9150_EVENTS_F_SHIFT 7
319#define DA9150_EVENTS_F_MASK BIT(7)
320
321/* DA9150_EVENT_F = 0x079 */
322#define DA9150_E_CONF_SHIFT 0
323#define DA9150_E_CONF_MASK BIT(0)
324#define DA9150_E_DAT_SHIFT 1
325#define DA9150_E_DAT_MASK BIT(1)
326#define DA9150_E_DTYPE_SHIFT 3
327#define DA9150_E_DTYPE_MASK BIT(3)
328#define DA9150_E_ID_SHIFT 4
329#define DA9150_E_ID_MASK BIT(4)
330#define DA9150_E_ADP_SHIFT 5
331#define DA9150_E_ADP_MASK BIT(5)
332#define DA9150_E_SESS_END_SHIFT 6
333#define DA9150_E_SESS_END_MASK BIT(6)
334#define DA9150_E_SESS_VLD_SHIFT 7
335#define DA9150_E_SESS_VLD_MASK BIT(7)
336
337/* DA9150_EVENT_G = 0x07A */
338#define DA9150_E_FG_SHIFT 0
339#define DA9150_E_FG_MASK BIT(0)
340#define DA9150_E_GP_SHIFT 1
341#define DA9150_E_GP_MASK BIT(1)
342#define DA9150_E_TBAT_SHIFT 2
343#define DA9150_E_TBAT_MASK BIT(2)
344#define DA9150_E_GPIOA_SHIFT 3
345#define DA9150_E_GPIOA_MASK BIT(3)
346#define DA9150_E_GPIOB_SHIFT 4
347#define DA9150_E_GPIOB_MASK BIT(4)
348#define DA9150_E_GPIOC_SHIFT 5
349#define DA9150_E_GPIOC_MASK BIT(5)
350#define DA9150_E_GPIOD_SHIFT 6
351#define DA9150_E_GPIOD_MASK BIT(6)
352#define DA9150_E_GPADC_SHIFT 7
353#define DA9150_E_GPADC_MASK BIT(7)
354
355/* DA9150_EVENT_H = 0x07B */
356#define DA9150_E_WKUP_SHIFT 0
357#define DA9150_E_WKUP_MASK BIT(0)
358
359/* DA9150_IRQ_MASK_E = 0x07C */
360#define DA9150_M_VBUS_SHIFT 0
361#define DA9150_M_VBUS_MASK BIT(0)
362#define DA9150_M_CHG_SHIFT 1
363#define DA9150_M_CHG_MASK BIT(1)
364#define DA9150_M_TJUNC_SHIFT 3
365#define DA9150_M_TJUNC_MASK BIT(3)
366#define DA9150_M_VFAULT_SHIFT 4
367#define DA9150_M_VFAULT_MASK BIT(4)
368
369/* DA9150_IRQ_MASK_F = 0x07D */
370#define DA9150_M_CONF_SHIFT 0
371#define DA9150_M_CONF_MASK BIT(0)
372#define DA9150_M_DAT_SHIFT 1
373#define DA9150_M_DAT_MASK BIT(1)
374#define DA9150_M_DTYPE_SHIFT 3
375#define DA9150_M_DTYPE_MASK BIT(3)
376#define DA9150_M_ID_SHIFT 4
377#define DA9150_M_ID_MASK BIT(4)
378#define DA9150_M_ADP_SHIFT 5
379#define DA9150_M_ADP_MASK BIT(5)
380#define DA9150_M_SESS_END_SHIFT 6
381#define DA9150_M_SESS_END_MASK BIT(6)
382#define DA9150_M_SESS_VLD_SHIFT 7
383#define DA9150_M_SESS_VLD_MASK BIT(7)
384
385/* DA9150_IRQ_MASK_G = 0x07E */
386#define DA9150_M_FG_SHIFT 0
387#define DA9150_M_FG_MASK BIT(0)
388#define DA9150_M_GP_SHIFT 1
389#define DA9150_M_GP_MASK BIT(1)
390#define DA9150_M_TBAT_SHIFT 2
391#define DA9150_M_TBAT_MASK BIT(2)
392#define DA9150_M_GPIOA_SHIFT 3
393#define DA9150_M_GPIOA_MASK BIT(3)
394#define DA9150_M_GPIOB_SHIFT 4
395#define DA9150_M_GPIOB_MASK BIT(4)
396#define DA9150_M_GPIOC_SHIFT 5
397#define DA9150_M_GPIOC_MASK BIT(5)
398#define DA9150_M_GPIOD_SHIFT 6
399#define DA9150_M_GPIOD_MASK BIT(6)
400#define DA9150_M_GPADC_SHIFT 7
401#define DA9150_M_GPADC_MASK BIT(7)
402
403/* DA9150_IRQ_MASK_H = 0x07F */
404#define DA9150_M_WKUP_SHIFT 0
405#define DA9150_M_WKUP_MASK BIT(0)
406
407/* DA9150_PAGE_CON_1 = 0x080 */
408#define DA9150_PAGE_SHIFT 0
409#define DA9150_PAGE_MASK (0x3f << 0)
410#define DA9150_WRITE_MODE_SHIFT 6
411#define DA9150_WRITE_MODE_MASK BIT(6)
412#define DA9150_REVERT_SHIFT 7
413#define DA9150_REVERT_MASK BIT(7)
414
415/* DA9150_CONFIG_A = 0x0E0 */
416#define DA9150_RESET_DUR_SHIFT 0
417#define DA9150_RESET_DUR_MASK (0x03 << 0)
418#define DA9150_RESET_EXT_SHIFT 2
419#define DA9150_RESET_EXT_MASK (0x03 << 2)
420#define DA9150_START_MAX_SHIFT 4
421#define DA9150_START_MAX_MASK (0x03 << 4)
422#define DA9150_PS_WAIT_EN_SHIFT 6
423#define DA9150_PS_WAIT_EN_MASK BIT(6)
424#define DA9150_PS_DISABLE_DIRECT_SHIFT 7
425#define DA9150_PS_DISABLE_DIRECT_MASK BIT(7)
426
427/* DA9150_CONFIG_B = 0x0E1 */
428#define DA9150_VFAULT_ADJ_SHIFT 0
429#define DA9150_VFAULT_ADJ_MASK (0x0f << 0)
430#define DA9150_VFAULT_HYST_SHIFT 4
431#define DA9150_VFAULT_HYST_MASK (0x07 << 4)
432#define DA9150_VFAULT_EN_SHIFT 7
433#define DA9150_VFAULT_EN_MASK BIT(7)
434
435/* DA9150_CONFIG_C = 0x0E2 */
436#define DA9150_VSYS_MIN_SHIFT 3
437#define DA9150_VSYS_MIN_MASK (0x1f << 3)
438
439/* DA9150_CONFIG_D = 0x0E3 */
440#define DA9150_LFOSC_EXT_SHIFT 0
441#define DA9150_LFOSC_EXT_MASK BIT(0)
442#define DA9150_VDD33_DWN_SHIFT 1
443#define DA9150_VDD33_DWN_MASK BIT(1)
444#define DA9150_WKUP_PM_EN_SHIFT 2
445#define DA9150_WKUP_PM_EN_MASK BIT(2)
446#define DA9150_WKUP_CE_SEL_SHIFT 3
447#define DA9150_WKUP_CE_SEL_MASK (0x03 << 3)
448#define DA9150_WKUP_CLK32K_EN_SHIFT 5
449#define DA9150_WKUP_CLK32K_EN_MASK BIT(5)
450#define DA9150_DISABLE_DEL_SHIFT 7
451#define DA9150_DISABLE_DEL_MASK BIT(7)
452
453/* DA9150_CONFIG_E = 0x0E4 */
454#define DA9150_PM_SPKSUP_DIS_SHIFT 0
455#define DA9150_PM_SPKSUP_DIS_MASK BIT(0)
456#define DA9150_PM_MERGE_SHIFT 1
457#define DA9150_PM_MERGE_MASK BIT(1)
458#define DA9150_PM_SR_OFF_SHIFT 2
459#define DA9150_PM_SR_OFF_MASK BIT(2)
460#define DA9150_PM_TIMEOUT_EN_SHIFT 3
461#define DA9150_PM_TIMEOUT_EN_MASK BIT(3)
462#define DA9150_PM_DLY_SEL_SHIFT 4
463#define DA9150_PM_DLY_SEL_MASK (0x07 << 4)
464#define DA9150_PM_OUT_DLY_SEL_SHIFT 7
465#define DA9150_PM_OUT_DLY_SEL_MASK BIT(7)
466
467/* DA9150_CONTROL_A = 0x0E5 */
468#define DA9150_VDD33_SL_SHIFT 0
469#define DA9150_VDD33_SL_MASK BIT(0)
470#define DA9150_VDD33_LPM_SHIFT 1
471#define DA9150_VDD33_LPM_MASK (0x03 << 1)
472#define DA9150_VDD33_EN_SHIFT 3
473#define DA9150_VDD33_EN_MASK BIT(3)
474#define DA9150_GPI_LPM_SHIFT 6
475#define DA9150_GPI_LPM_MASK BIT(6)
476#define DA9150_PM_IF_LPM_SHIFT 7
477#define DA9150_PM_IF_LPM_MASK BIT(7)
478
479/* DA9150_CONTROL_B = 0x0E6 */
480#define DA9150_LPM_SHIFT 0
481#define DA9150_LPM_MASK BIT(0)
482#define DA9150_RESET_SHIFT 1
483#define DA9150_RESET_MASK BIT(1)
484#define DA9150_RESET_USRCONF_EN_SHIFT 2
485#define DA9150_RESET_USRCONF_EN_MASK BIT(2)
486
487/* DA9150_CONTROL_C = 0x0E7 */
488#define DA9150_DISABLE_SHIFT 0
489#define DA9150_DISABLE_MASK BIT(0)
490
491/* DA9150_GPIO_A_B = 0x0E8 */
492#define DA9150_GPIOA_PIN_SHIFT 0
493#define DA9150_GPIOA_PIN_MASK (0x07 << 0)
494#define DA9150_GPIOA_PIN_GPI (0x00 << 0)
495#define DA9150_GPIOA_PIN_GPO_OD BIT(0)
496#define DA9150_GPIOA_TYPE_SHIFT 3
497#define DA9150_GPIOA_TYPE_MASK BIT(3)
498#define DA9150_GPIOB_PIN_SHIFT 4
499#define DA9150_GPIOB_PIN_MASK (0x07 << 4)
500#define DA9150_GPIOB_PIN_GPI (0x00 << 4)
501#define DA9150_GPIOB_PIN_GPO_OD BIT(4)
502#define DA9150_GPIOB_TYPE_SHIFT 7
503#define DA9150_GPIOB_TYPE_MASK BIT(7)
504
505/* DA9150_GPIO_C_D = 0x0E9 */
506#define DA9150_GPIOC_PIN_SHIFT 0
507#define DA9150_GPIOC_PIN_MASK (0x07 << 0)
508#define DA9150_GPIOC_PIN_GPI (0x00 << 0)
509#define DA9150_GPIOC_PIN_GPO_OD BIT(0)
510#define DA9150_GPIOC_TYPE_SHIFT 3
511#define DA9150_GPIOC_TYPE_MASK BIT(3)
512#define DA9150_GPIOD_PIN_SHIFT 4
513#define DA9150_GPIOD_PIN_MASK (0x07 << 4)
514#define DA9150_GPIOD_PIN_GPI (0x00 << 4)
515#define DA9150_GPIOD_PIN_GPO_OD BIT(4)
516#define DA9150_GPIOD_TYPE_SHIFT 7
517#define DA9150_GPIOD_TYPE_MASK BIT(7)
518
519/* DA9150_GPIO_MODE_CONT = 0x0EA */
520#define DA9150_GPIOA_MODE_SHIFT 0
521#define DA9150_GPIOA_MODE_MASK BIT(0)
522#define DA9150_GPIOB_MODE_SHIFT 1
523#define DA9150_GPIOB_MODE_MASK BIT(1)
524#define DA9150_GPIOC_MODE_SHIFT 2
525#define DA9150_GPIOC_MODE_MASK BIT(2)
526#define DA9150_GPIOD_MODE_SHIFT 3
527#define DA9150_GPIOD_MODE_MASK BIT(3)
528#define DA9150_GPIOA_CONT_SHIFT 4
529#define DA9150_GPIOA_CONT_MASK BIT(4)
530#define DA9150_GPIOB_CONT_SHIFT 5
531#define DA9150_GPIOB_CONT_MASK BIT(5)
532#define DA9150_GPIOC_CONT_SHIFT 6
533#define DA9150_GPIOC_CONT_MASK BIT(6)
534#define DA9150_GPIOD_CONT_SHIFT 7
535#define DA9150_GPIOD_CONT_MASK BIT(7)
536
537/* DA9150_GPIO_CTRL_B = 0x0EB */
538#define DA9150_WAKE_PIN_SHIFT 0
539#define DA9150_WAKE_PIN_MASK (0x03 << 0)
540#define DA9150_WAKE_MODE_SHIFT 2
541#define DA9150_WAKE_MODE_MASK BIT(2)
542#define DA9150_WAKE_CONT_SHIFT 3
543#define DA9150_WAKE_CONT_MASK BIT(3)
544#define DA9150_WAKE_DLY_SHIFT 4
545#define DA9150_WAKE_DLY_MASK BIT(4)
546
547/* DA9150_GPIO_CTRL_A = 0x0EC */
548#define DA9150_GPIOA_ANAEN_SHIFT 0
549#define DA9150_GPIOA_ANAEN_MASK BIT(0)
550#define DA9150_GPIOB_ANAEN_SHIFT 1
551#define DA9150_GPIOB_ANAEN_MASK BIT(1)
552#define DA9150_GPIOC_ANAEN_SHIFT 2
553#define DA9150_GPIOC_ANAEN_MASK BIT(2)
554#define DA9150_GPIOD_ANAEN_SHIFT 3
555#define DA9150_GPIOD_ANAEN_MASK BIT(3)
556#define DA9150_GPIO_ANAEN 0x01
557#define DA9150_GPIO_ANAEN_MASK 0x0F
558#define DA9150_CHGLED_PIN_SHIFT 5
559#define DA9150_CHGLED_PIN_MASK (0x07 << 5)
560
561/* DA9150_GPIO_CTRL_C = 0x0ED */
562#define DA9150_CHGBL_DUR_SHIFT 0
563#define DA9150_CHGBL_DUR_MASK (0x03 << 0)
564#define DA9150_CHGBL_DBL_SHIFT 2
565#define DA9150_CHGBL_DBL_MASK BIT(2)
566#define DA9150_CHGBL_FRQ_SHIFT 3
567#define DA9150_CHGBL_FRQ_MASK (0x03 << 3)
568#define DA9150_CHGBL_FLKR_SHIFT 5
569#define DA9150_CHGBL_FLKR_MASK BIT(5)
570
571/* DA9150_GPIO_CFG_A = 0x0EE */
572#define DA9150_CE_LPM_DEB_SHIFT 0
573#define DA9150_CE_LPM_DEB_MASK (0x07 << 0)
574
575/* DA9150_GPIO_CFG_B = 0x0EF */
576#define DA9150_GPIOA_PUPD_SHIFT 0
577#define DA9150_GPIOA_PUPD_MASK BIT(0)
578#define DA9150_GPIOB_PUPD_SHIFT 1
579#define DA9150_GPIOB_PUPD_MASK BIT(1)
580#define DA9150_GPIOC_PUPD_SHIFT 2
581#define DA9150_GPIOC_PUPD_MASK BIT(2)
582#define DA9150_GPIOD_PUPD_SHIFT 3
583#define DA9150_GPIOD_PUPD_MASK BIT(3)
584#define DA9150_GPIO_PUPD_MASK (0xF << 0)
585#define DA9150_GPI_DEB_SHIFT 4
586#define DA9150_GPI_DEB_MASK (0x07 << 4)
587#define DA9150_LPM_EN_SHIFT 7
588#define DA9150_LPM_EN_MASK BIT(7)
589
590/* DA9150_GPIO_CFG_C = 0x0F0 */
591#define DA9150_GPI_V_SHIFT 0
592#define DA9150_GPI_V_MASK BIT(0)
593#define DA9150_VDDIO_INT_SHIFT 1
594#define DA9150_VDDIO_INT_MASK BIT(1)
595#define DA9150_FAULT_PIN_SHIFT 3
596#define DA9150_FAULT_PIN_MASK (0x07 << 3)
597#define DA9150_FAULT_TYPE_SHIFT 6
598#define DA9150_FAULT_TYPE_MASK BIT(6)
599#define DA9150_NIRQ_PUPD_SHIFT 7
600#define DA9150_NIRQ_PUPD_MASK BIT(7)
601
602/* DA9150_GPADC_MAN = 0x0F2 */
603#define DA9150_GPADC_EN_SHIFT 0
604#define DA9150_GPADC_EN_MASK BIT(0)
605#define DA9150_GPADC_MUX_SHIFT 1
606#define DA9150_GPADC_MUX_MASK (0x1f << 1)
607
608/* DA9150_GPADC_RES_A = 0x0F4 */
609#define DA9150_GPADC_RES_H_SHIFT 0
610#define DA9150_GPADC_RES_H_MASK (0xff << 0)
611
612/* DA9150_GPADC_RES_B = 0x0F5 */
613#define DA9150_GPADC_RUN_SHIFT 0
614#define DA9150_GPADC_RUN_MASK BIT(0)
615#define DA9150_GPADC_RES_L_SHIFT 6
616#define DA9150_GPADC_RES_L_MASK (0x03 << 6)
617#define DA9150_GPADC_RES_L_BITS 2
618
619/* DA9150_PAGE_CON_2 = 0x100 */
620#define DA9150_PAGE_SHIFT 0
621#define DA9150_PAGE_MASK (0x3f << 0)
622#define DA9150_WRITE_MODE_SHIFT 6
623#define DA9150_WRITE_MODE_MASK BIT(6)
624#define DA9150_REVERT_SHIFT 7
625#define DA9150_REVERT_MASK BIT(7)
626
627/* DA9150_OTP_CONT_SHARED = 0x101 */
628#define DA9150_PC_DONE_SHIFT 3
629#define DA9150_PC_DONE_MASK BIT(3)
630
631/* DA9150_INTERFACE_SHARED = 0x105 */
632#define DA9150_IF_BASE_ADDR_SHIFT 4
633#define DA9150_IF_BASE_ADDR_MASK (0x0f << 4)
634
635/* DA9150_CONFIG_A_SHARED = 0x106 */
636#define DA9150_NIRQ_VDD_SHIFT 1
637#define DA9150_NIRQ_VDD_MASK BIT(1)
638#define DA9150_NIRQ_PIN_SHIFT 2
639#define DA9150_NIRQ_PIN_MASK BIT(2)
640#define DA9150_NIRQ_TYPE_SHIFT 3
641#define DA9150_NIRQ_TYPE_MASK BIT(3)
642#define DA9150_PM_IF_V_SHIFT 4
643#define DA9150_PM_IF_V_MASK BIT(4)
644#define DA9150_PM_IF_FMP_SHIFT 5
645#define DA9150_PM_IF_FMP_MASK BIT(5)
646#define DA9150_PM_IF_HSM_SHIFT 6
647#define DA9150_PM_IF_HSM_MASK BIT(6)
648
649/* DA9150_CONFIG_D_SHARED = 0x109 */
650#define DA9150_NIRQ_MODE_SHIFT 1
651#define DA9150_NIRQ_MODE_MASK BIT(1)
652
653/* DA9150_ADETVB_CFG_C = 0x150 */
654#define DA9150_TADP_RISE_SHIFT 0
655#define DA9150_TADP_RISE_MASK (0xff << 0)
656
657/* DA9150_ADETD_STAT = 0x151 */
658#define DA9150_DCD_STAT_SHIFT 0
659#define DA9150_DCD_STAT_MASK BIT(0)
660#define DA9150_PCD_STAT_SHIFT 1
661#define DA9150_PCD_STAT_MASK (0x03 << 1)
662#define DA9150_SCD_STAT_SHIFT 3
663#define DA9150_SCD_STAT_MASK (0x03 << 3)
664#define DA9150_DP_STAT_SHIFT 5
665#define DA9150_DP_STAT_MASK BIT(5)
666#define DA9150_DM_STAT_SHIFT 6
667#define DA9150_DM_STAT_MASK BIT(6)
668
669/* DA9150_ADET_CMPSTAT = 0x152 */
670#define DA9150_DP_COMP_SHIFT 1
671#define DA9150_DP_COMP_MASK BIT(1)
672#define DA9150_DM_COMP_SHIFT 2
673#define DA9150_DM_COMP_MASK BIT(2)
674#define DA9150_ADP_SNS_COMP_SHIFT 3
675#define DA9150_ADP_SNS_COMP_MASK BIT(3)
676#define DA9150_ADP_PRB_COMP_SHIFT 4
677#define DA9150_ADP_PRB_COMP_MASK BIT(4)
678#define DA9150_ID_COMP_SHIFT 5
679#define DA9150_ID_COMP_MASK BIT(5)
680
681/* DA9150_ADET_CTRL_A = 0x153 */
682#define DA9150_AID_DAT_SHIFT 0
683#define DA9150_AID_DAT_MASK BIT(0)
684#define DA9150_AID_ID_SHIFT 1
685#define DA9150_AID_ID_MASK BIT(1)
686#define DA9150_AID_TRIG_SHIFT 2
687#define DA9150_AID_TRIG_MASK BIT(2)
688
689/* DA9150_ADETVB_CFG_B = 0x154 */
690#define DA9150_VB_MODE_SHIFT 0
691#define DA9150_VB_MODE_MASK (0x03 << 0)
692#define DA9150_VB_MODE_VB_SESS BIT(0)
693
694#define DA9150_TADP_PRB_SHIFT 2
695#define DA9150_TADP_PRB_MASK BIT(2)
696#define DA9150_DAT_RPD_EXT_SHIFT 5
697#define DA9150_DAT_RPD_EXT_MASK BIT(5)
698#define DA9150_CONF_RPD_SHIFT 6
699#define DA9150_CONF_RPD_MASK BIT(6)
700#define DA9150_CONF_SRP_SHIFT 7
701#define DA9150_CONF_SRP_MASK BIT(7)
702
703/* DA9150_ADETVB_CFG_A = 0x155 */
704#define DA9150_AID_MODE_SHIFT 0
705#define DA9150_AID_MODE_MASK (0x03 << 0)
706#define DA9150_AID_EXT_POL_SHIFT 2
707#define DA9150_AID_EXT_POL_MASK BIT(2)
708
709/* DA9150_ADETAC_CFG_A = 0x156 */
710#define DA9150_ISET_CDP_SHIFT 0
711#define DA9150_ISET_CDP_MASK (0x1f << 0)
712#define DA9150_CONF_DBP_SHIFT 5
713#define DA9150_CONF_DBP_MASK BIT(5)
714
715/* DA9150_ADDETAC_CFG_B = 0x157 */
716#define DA9150_ISET_DCHG_SHIFT 0
717#define DA9150_ISET_DCHG_MASK (0x1f << 0)
718#define DA9150_CONF_GPIOA_SHIFT 5
719#define DA9150_CONF_GPIOA_MASK BIT(5)
720#define DA9150_CONF_GPIOB_SHIFT 6
721#define DA9150_CONF_GPIOB_MASK BIT(6)
722#define DA9150_AID_VB_SHIFT 7
723#define DA9150_AID_VB_MASK BIT(7)
724
725/* DA9150_ADETAC_CFG_C = 0x158 */
726#define DA9150_ISET_DEF_SHIFT 0
727#define DA9150_ISET_DEF_MASK (0x1f << 0)
728#define DA9150_CONF_MODE_SHIFT 5
729#define DA9150_CONF_MODE_MASK (0x03 << 5)
730#define DA9150_AID_CR_DIS_SHIFT 7
731#define DA9150_AID_CR_DIS_MASK BIT(7)
732
733/* DA9150_ADETAC_CFG_D = 0x159 */
734#define DA9150_ISET_UNIT_SHIFT 0
735#define DA9150_ISET_UNIT_MASK (0x1f << 0)
736#define DA9150_AID_UNCLAMP_SHIFT 5
737#define DA9150_AID_UNCLAMP_MASK BIT(5)
738
739/* DA9150_ADETVB_CFG_D = 0x15A */
740#define DA9150_ID_MODE_SHIFT 0
741#define DA9150_ID_MODE_MASK (0x03 << 0)
742#define DA9150_DAT_MODE_SHIFT 2
743#define DA9150_DAT_MODE_MASK (0x0f << 2)
744#define DA9150_DAT_SWP_SHIFT 6
745#define DA9150_DAT_SWP_MASK BIT(6)
746#define DA9150_DAT_CLAMP_EXT_SHIFT 7
747#define DA9150_DAT_CLAMP_EXT_MASK BIT(7)
748
749/* DA9150_ADETID_CFG_A = 0x15B */
750#define DA9150_TID_POLL_SHIFT 0
751#define DA9150_TID_POLL_MASK (0x07 << 0)
752#define DA9150_RID_CONV_SHIFT 3
753#define DA9150_RID_CONV_MASK BIT(3)
754
755/* DA9150_ADET_RID_PT_CHG_H = 0x15C */
756#define DA9150_RID_PT_CHG_H_SHIFT 0
757#define DA9150_RID_PT_CHG_H_MASK (0xff << 0)
758
759/* DA9150_ADET_RID_PT_CHG_L = 0x15D */
760#define DA9150_RID_PT_CHG_L_SHIFT 6
761#define DA9150_RID_PT_CHG_L_MASK (0x03 << 6)
762
763/* DA9150_PPR_TCTR_B = 0x160 */
764#define DA9150_CHG_TCTR_VAL_SHIFT 0
765#define DA9150_CHG_TCTR_VAL_MASK (0xff << 0)
766
767/* DA9150_PPR_BKCTRL_A = 0x163 */
768#define DA9150_VBUS_MODE_SHIFT 0
769#define DA9150_VBUS_MODE_MASK (0x03 << 0)
770#define DA9150_VBUS_MODE_CHG BIT(0)
771#define DA9150_VBUS_MODE_OTG (0x02 << 0)
772#define DA9150_VBUS_LPM_SHIFT 2
773#define DA9150_VBUS_LPM_MASK (0x03 << 2)
774#define DA9150_VBUS_SUSP_SHIFT 4
775#define DA9150_VBUS_SUSP_MASK BIT(4)
776#define DA9150_VBUS_PWM_SHIFT 5
777#define DA9150_VBUS_PWM_MASK BIT(5)
778#define DA9150_VBUS_ISO_SHIFT 6
779#define DA9150_VBUS_ISO_MASK BIT(6)
780#define DA9150_VBUS_LDO_SHIFT 7
781#define DA9150_VBUS_LDO_MASK BIT(7)
782
783/* DA9150_PPR_BKCFG_A = 0x164 */
784#define DA9150_VBUS_ISET_SHIFT 0
785#define DA9150_VBUS_ISET_MASK (0x1f << 0)
786#define DA9150_VBUS_IMAX_SHIFT 5
787#define DA9150_VBUS_IMAX_MASK BIT(5)
788#define DA9150_VBUS_IOTG_SHIFT 6
789#define DA9150_VBUS_IOTG_MASK (0x03 << 6)
790
791/* DA9150_PPR_BKCFG_B = 0x165 */
792#define DA9150_VBUS_DROP_SHIFT 0
793#define DA9150_VBUS_DROP_MASK (0x0f << 0)
794#define DA9150_VBUS_FAULT_DIS_SHIFT 6
795#define DA9150_VBUS_FAULT_DIS_MASK BIT(6)
796#define DA9150_OTG_FAULT_DIS_SHIFT 7
797#define DA9150_OTG_FAULT_DIS_MASK BIT(7)
798
799/* DA9150_PPR_CHGCTRL_A = 0x166 */
800#define DA9150_CHG_EN_SHIFT 0
801#define DA9150_CHG_EN_MASK BIT(0)
802
803/* DA9150_PPR_CHGCTRL_B = 0x167 */
804#define DA9150_CHG_VBAT_SHIFT 0
805#define DA9150_CHG_VBAT_MASK (0x1f << 0)
806#define DA9150_CHG_VDROP_SHIFT 6
807#define DA9150_CHG_VDROP_MASK (0x03 << 6)
808
809/* DA9150_PPR_CHGCTRL_C = 0x168 */
810#define DA9150_CHG_VFAULT_SHIFT 0
811#define DA9150_CHG_VFAULT_MASK (0x0f << 0)
812#define DA9150_CHG_IPRE_SHIFT 4
813#define DA9150_CHG_IPRE_MASK (0x03 << 4)
814
815/* DA9150_PPR_TCTR_A = 0x169 */
816#define DA9150_CHG_TCTR_SHIFT 0
817#define DA9150_CHG_TCTR_MASK (0x07 << 0)
818#define DA9150_CHG_TCTR_MODE_SHIFT 4
819#define DA9150_CHG_TCTR_MODE_MASK BIT(4)
820
821/* DA9150_PPR_CHGCTRL_D = 0x16A */
822#define DA9150_CHG_IBAT_SHIFT 0
823#define DA9150_CHG_IBAT_MASK (0xff << 0)
824
825/* DA9150_PPR_CHGCTRL_E = 0x16B */
826#define DA9150_CHG_IEND_SHIFT 0
827#define DA9150_CHG_IEND_MASK (0xff << 0)
828
829/* DA9150_PPR_CHGCTRL_F = 0x16C */
830#define DA9150_CHG_VCOLD_SHIFT 0
831#define DA9150_CHG_VCOLD_MASK (0x1f << 0)
832#define DA9150_TBAT_TQA_EN_SHIFT 6
833#define DA9150_TBAT_TQA_EN_MASK BIT(6)
834#define DA9150_TBAT_TDP_EN_SHIFT 7
835#define DA9150_TBAT_TDP_EN_MASK BIT(7)
836
837/* DA9150_PPR_CHGCTRL_G = 0x16D */
838#define DA9150_CHG_VWARM_SHIFT 0
839#define DA9150_CHG_VWARM_MASK (0x1f << 0)
840
841/* DA9150_PPR_CHGCTRL_H = 0x16E */
842#define DA9150_CHG_VHOT_SHIFT 0
843#define DA9150_CHG_VHOT_MASK (0x1f << 0)
844
845/* DA9150_PPR_CHGCTRL_I = 0x16F */
846#define DA9150_CHG_ICOLD_SHIFT 0
847#define DA9150_CHG_ICOLD_MASK (0xff << 0)
848
849/* DA9150_PPR_CHGCTRL_J = 0x170 */
850#define DA9150_CHG_IWARM_SHIFT 0
851#define DA9150_CHG_IWARM_MASK (0xff << 0)
852
853/* DA9150_PPR_CHGCTRL_K = 0x171 */
854#define DA9150_CHG_IHOT_SHIFT 0
855#define DA9150_CHG_IHOT_MASK (0xff << 0)
856
857/* DA9150_PPR_CHGCTRL_L = 0x172 */
858#define DA9150_CHG_IBAT_TRED_SHIFT 0
859#define DA9150_CHG_IBAT_TRED_MASK (0xff << 0)
860
861/* DA9150_PPR_CHGCTRL_M = 0x173 */
862#define DA9150_CHG_VFLOAT_SHIFT 0
863#define DA9150_CHG_VFLOAT_MASK (0x0f << 0)
864#define DA9150_CHG_LPM_SHIFT 5
865#define DA9150_CHG_LPM_MASK BIT(5)
866#define DA9150_CHG_NBLO_SHIFT 6
867#define DA9150_CHG_NBLO_MASK BIT(6)
868#define DA9150_EBS_EN_SHIFT 7
869#define DA9150_EBS_EN_MASK BIT(7)
870
871/* DA9150_PPR_THYST_A = 0x174 */
872#define DA9150_TBAT_T1_SHIFT 0
873#define DA9150_TBAT_T1_MASK (0xff << 0)
874
875/* DA9150_PPR_THYST_B = 0x175 */
876#define DA9150_TBAT_T2_SHIFT 0
877#define DA9150_TBAT_T2_MASK (0xff << 0)
878
879/* DA9150_PPR_THYST_C = 0x176 */
880#define DA9150_TBAT_T3_SHIFT 0
881#define DA9150_TBAT_T3_MASK (0xff << 0)
882
883/* DA9150_PPR_THYST_D = 0x177 */
884#define DA9150_TBAT_T4_SHIFT 0
885#define DA9150_TBAT_T4_MASK (0xff << 0)
886
887/* DA9150_PPR_THYST_E = 0x178 */
888#define DA9150_TBAT_T5_SHIFT 0
889#define DA9150_TBAT_T5_MASK (0xff << 0)
890
891/* DA9150_PPR_THYST_F = 0x179 */
892#define DA9150_TBAT_H1_SHIFT 0
893#define DA9150_TBAT_H1_MASK (0xff << 0)
894
895/* DA9150_PPR_THYST_G = 0x17A */
896#define DA9150_TBAT_H5_SHIFT 0
897#define DA9150_TBAT_H5_MASK (0xff << 0)
898
899/* DA9150_PAGE_CON_3 = 0x180 */
900#define DA9150_PAGE_SHIFT 0
901#define DA9150_PAGE_MASK (0x3f << 0)
902#define DA9150_WRITE_MODE_SHIFT 6
903#define DA9150_WRITE_MODE_MASK BIT(6)
904#define DA9150_REVERT_SHIFT 7
905#define DA9150_REVERT_MASK BIT(7)
906
907/* DA9150_PAGE_CON_4 = 0x200 */
908#define DA9150_PAGE_SHIFT 0
909#define DA9150_PAGE_MASK (0x3f << 0)
910#define DA9150_WRITE_MODE_SHIFT 6
911#define DA9150_WRITE_MODE_MASK BIT(6)
912#define DA9150_REVERT_SHIFT 7
913#define DA9150_REVERT_MASK BIT(7)
914
915/* DA9150_PAGE_CON_5 = 0x280 */
916#define DA9150_PAGE_SHIFT 0
917#define DA9150_PAGE_MASK (0x3f << 0)
918#define DA9150_WRITE_MODE_SHIFT 6
919#define DA9150_WRITE_MODE_MASK BIT(6)
920#define DA9150_REVERT_SHIFT 7
921#define DA9150_REVERT_MASK BIT(7)
922
923/* DA9150_PAGE_CON_6 = 0x300 */
924#define DA9150_PAGE_SHIFT 0
925#define DA9150_PAGE_MASK (0x3f << 0)
926#define DA9150_WRITE_MODE_SHIFT 6
927#define DA9150_WRITE_MODE_MASK BIT(6)
928#define DA9150_REVERT_SHIFT 7
929#define DA9150_REVERT_MASK BIT(7)
930
931/* DA9150_COREBTLD_STAT_A = 0x302 */
932#define DA9150_BOOTLD_STAT_SHIFT 0
933#define DA9150_BOOTLD_STAT_MASK (0x03 << 0)
934#define DA9150_CORE_LOCKUP_SHIFT 2
935#define DA9150_CORE_LOCKUP_MASK BIT(2)
936
937/* DA9150_COREBTLD_CTRL_A = 0x303 */
938#define DA9150_CORE_RESET_SHIFT 0
939#define DA9150_CORE_RESET_MASK BIT(0)
940#define DA9150_CORE_STOP_SHIFT 1
941#define DA9150_CORE_STOP_MASK BIT(1)
942
943/* DA9150_CORE_CONFIG_A = 0x304 */
944#define DA9150_CORE_MEMMUX_SHIFT 0
945#define DA9150_CORE_MEMMUX_MASK (0x03 << 0)
946#define DA9150_WDT_AUTO_START_SHIFT 2
947#define DA9150_WDT_AUTO_START_MASK BIT(2)
948#define DA9150_WDT_AUTO_LOCK_SHIFT 3
949#define DA9150_WDT_AUTO_LOCK_MASK BIT(3)
950#define DA9150_WDT_HLT_NO_CLK_SHIFT 4
951#define DA9150_WDT_HLT_NO_CLK_MASK BIT(4)
952
953/* DA9150_CORE_CONFIG_C = 0x305 */
954#define DA9150_CORE_SW_SIZE_SHIFT 0
955#define DA9150_CORE_SW_SIZE_MASK (0xff << 0)
956
957/* DA9150_CORE_CONFIG_B = 0x306 */
958#define DA9150_BOOTLD_EN_SHIFT 0
959#define DA9150_BOOTLD_EN_MASK BIT(0)
960#define DA9150_CORE_EN_SHIFT 2
961#define DA9150_CORE_EN_MASK BIT(2)
962#define DA9150_CORE_SW_SRC_SHIFT 3
963#define DA9150_CORE_SW_SRC_MASK (0x07 << 3)
964#define DA9150_DEEP_SLEEP_EN_SHIFT 7
965#define DA9150_DEEP_SLEEP_EN_MASK BIT(7)
966
967/* DA9150_CORE_CFG_DATA_A = 0x307 */
968#define DA9150_CORE_CFG_DT_A_SHIFT 0
969#define DA9150_CORE_CFG_DT_A_MASK (0xff << 0)
970
971/* DA9150_CORE_CFG_DATA_B = 0x308 */
972#define DA9150_CORE_CFG_DT_B_SHIFT 0
973#define DA9150_CORE_CFG_DT_B_MASK (0xff << 0)
974
975/* DA9150_CORE_CMD_A = 0x309 */
976#define DA9150_CORE_CMD_SHIFT 0
977#define DA9150_CORE_CMD_MASK (0xff << 0)
978
979/* DA9150_CORE_DATA_A = 0x30A */
980#define DA9150_CORE_DATA_0_SHIFT 0
981#define DA9150_CORE_DATA_0_MASK (0xff << 0)
982
983/* DA9150_CORE_DATA_B = 0x30B */
984#define DA9150_CORE_DATA_1_SHIFT 0
985#define DA9150_CORE_DATA_1_MASK (0xff << 0)
986
987/* DA9150_CORE_DATA_C = 0x30C */
988#define DA9150_CORE_DATA_2_SHIFT 0
989#define DA9150_CORE_DATA_2_MASK (0xff << 0)
990
991/* DA9150_CORE_DATA_D = 0x30D */
992#define DA9150_CORE_DATA_3_SHIFT 0
993#define DA9150_CORE_DATA_3_MASK (0xff << 0)
994
995/* DA9150_CORE2WIRE_STAT_A = 0x310 */
996#define DA9150_FW_FWDL_ERR_SHIFT 7
997#define DA9150_FW_FWDL_ERR_MASK BIT(7)
998
999/* DA9150_CORE2WIRE_CTRL_A = 0x311 */
1000#define DA9150_FW_FWDL_EN_SHIFT 0
1001#define DA9150_FW_FWDL_EN_MASK BIT(0)
1002#define DA9150_FG_QIF_EN_SHIFT 1
1003#define DA9150_FG_QIF_EN_MASK BIT(1)
1004#define DA9150_CORE_BASE_ADDR_SHIFT 4
1005#define DA9150_CORE_BASE_ADDR_MASK (0x0f << 4)
1006
1007/* DA9150_FW_CTRL_A = 0x312 */
1008#define DA9150_FW_SEAL_SHIFT 0
1009#define DA9150_FW_SEAL_MASK (0xff << 0)
1010
1011/* DA9150_FW_CTRL_C = 0x313 */
1012#define DA9150_FW_FWDL_CRC_SHIFT 0
1013#define DA9150_FW_FWDL_CRC_MASK (0xff << 0)
1014
1015/* DA9150_FW_CTRL_D = 0x314 */
1016#define DA9150_FW_FWDL_BASE_SHIFT 0
1017#define DA9150_FW_FWDL_BASE_MASK (0x0f << 0)
1018
1019/* DA9150_FG_CTRL_A = 0x315 */
1020#define DA9150_FG_QIF_CODE_SHIFT 0
1021#define DA9150_FG_QIF_CODE_MASK (0xff << 0)
1022
1023/* DA9150_FG_CTRL_B = 0x316 */
1024#define DA9150_FG_QIF_VALUE_SHIFT 0
1025#define DA9150_FG_QIF_VALUE_MASK (0xff << 0)
1026
1027/* DA9150_FW_CTRL_E = 0x317 */
1028#define DA9150_FW_FWDL_SEG_SHIFT 0
1029#define DA9150_FW_FWDL_SEG_MASK (0xff << 0)
1030
1031/* DA9150_FW_CTRL_B = 0x318 */
1032#define DA9150_FW_FWDL_VALUE_SHIFT 0
1033#define DA9150_FW_FWDL_VALUE_MASK (0xff << 0)
1034
1035/* DA9150_GPADC_CMAN = 0x320 */
1036#define DA9150_GPADC_CEN_SHIFT 0
1037#define DA9150_GPADC_CEN_MASK BIT(0)
1038#define DA9150_GPADC_CMUX_SHIFT 1
1039#define DA9150_GPADC_CMUX_MASK (0x1f << 1)
1040
1041/* DA9150_GPADC_CRES_A = 0x322 */
1042#define DA9150_GPADC_CRES_H_SHIFT 0
1043#define DA9150_GPADC_CRES_H_MASK (0xff << 0)
1044
1045/* DA9150_GPADC_CRES_B = 0x323 */
1046#define DA9150_GPADC_CRUN_SHIFT 0
1047#define DA9150_GPADC_CRUN_MASK BIT(0)
1048#define DA9150_GPADC_CRES_L_SHIFT 6
1049#define DA9150_GPADC_CRES_L_MASK (0x03 << 6)
1050
1051/* DA9150_CC_CFG_A = 0x328 */
1052#define DA9150_CC_EN_SHIFT 0
1053#define DA9150_CC_EN_MASK BIT(0)
1054#define DA9150_CC_TIMEBASE_SHIFT 1
1055#define DA9150_CC_TIMEBASE_MASK (0x03 << 1)
1056#define DA9150_CC_CFG_SHIFT 5
1057#define DA9150_CC_CFG_MASK (0x03 << 5)
1058#define DA9150_CC_ENDLESS_MODE_SHIFT 7
1059#define DA9150_CC_ENDLESS_MODE_MASK BIT(7)
1060
1061/* DA9150_CC_CFG_B = 0x329 */
1062#define DA9150_CC_OPT_SHIFT 0
1063#define DA9150_CC_OPT_MASK (0x03 << 0)
1064#define DA9150_CC_PREAMP_SHIFT 2
1065#define DA9150_CC_PREAMP_MASK (0x03 << 2)
1066
1067/* DA9150_CC_ICHG_RES_A = 0x32A */
1068#define DA9150_CC_ICHG_RES_H_SHIFT 0
1069#define DA9150_CC_ICHG_RES_H_MASK (0xff << 0)
1070
1071/* DA9150_CC_ICHG_RES_B = 0x32B */
1072#define DA9150_CC_ICHG_RES_L_SHIFT 3
1073#define DA9150_CC_ICHG_RES_L_MASK (0x1f << 3)
1074
1075/* DA9150_CC_IAVG_RES_A = 0x32C */
1076#define DA9150_CC_IAVG_RES_H_SHIFT 0
1077#define DA9150_CC_IAVG_RES_H_MASK (0xff << 0)
1078
1079/* DA9150_CC_IAVG_RES_B = 0x32D */
1080#define DA9150_CC_IAVG_RES_L_SHIFT 0
1081#define DA9150_CC_IAVG_RES_L_MASK (0xff << 0)
1082
1083/* DA9150_TAUX_CTRL_A = 0x330 */
1084#define DA9150_TAUX_EN_SHIFT 0
1085#define DA9150_TAUX_EN_MASK BIT(0)
1086#define DA9150_TAUX_MOD_SHIFT 1
1087#define DA9150_TAUX_MOD_MASK BIT(1)
1088#define DA9150_TAUX_UPDATE_SHIFT 2
1089#define DA9150_TAUX_UPDATE_MASK BIT(2)
1090
1091/* DA9150_TAUX_RELOAD_H = 0x332 */
1092#define DA9150_TAUX_RLD_H_SHIFT 0
1093#define DA9150_TAUX_RLD_H_MASK (0xff << 0)
1094
1095/* DA9150_TAUX_RELOAD_L = 0x333 */
1096#define DA9150_TAUX_RLD_L_SHIFT 3
1097#define DA9150_TAUX_RLD_L_MASK (0x1f << 3)
1098
1099/* DA9150_TAUX_VALUE_H = 0x334 */
1100#define DA9150_TAUX_VAL_H_SHIFT 0
1101#define DA9150_TAUX_VAL_H_MASK (0xff << 0)
1102
1103/* DA9150_TAUX_VALUE_L = 0x335 */
1104#define DA9150_TAUX_VAL_L_SHIFT 3
1105#define DA9150_TAUX_VAL_L_MASK (0x1f << 3)
1106
1107/* DA9150_AUX_DATA_0 = 0x338 */
1108#define DA9150_AUX_DAT_0_SHIFT 0
1109#define DA9150_AUX_DAT_0_MASK (0xff << 0)
1110
1111/* DA9150_AUX_DATA_1 = 0x339 */
1112#define DA9150_AUX_DAT_1_SHIFT 0
1113#define DA9150_AUX_DAT_1_MASK (0xff << 0)
1114
1115/* DA9150_AUX_DATA_2 = 0x33A */
1116#define DA9150_AUX_DAT_2_SHIFT 0
1117#define DA9150_AUX_DAT_2_MASK (0xff << 0)
1118
1119/* DA9150_AUX_DATA_3 = 0x33B */
1120#define DA9150_AUX_DAT_3_SHIFT 0
1121#define DA9150_AUX_DAT_3_MASK (0xff << 0)
1122
1123/* DA9150_BIF_CTRL = 0x340 */
1124#define DA9150_BIF_ISRC_EN_SHIFT 0
1125#define DA9150_BIF_ISRC_EN_MASK BIT(0)
1126
1127/* DA9150_TBAT_CTRL_A = 0x342 */
1128#define DA9150_TBAT_EN_SHIFT 0
1129#define DA9150_TBAT_EN_MASK BIT(0)
1130#define DA9150_TBAT_SW1_SHIFT 1
1131#define DA9150_TBAT_SW1_MASK BIT(1)
1132#define DA9150_TBAT_SW2_SHIFT 2
1133#define DA9150_TBAT_SW2_MASK BIT(2)
1134
1135/* DA9150_TBAT_CTRL_B = 0x343 */
1136#define DA9150_TBAT_SW_FRC_SHIFT 0
1137#define DA9150_TBAT_SW_FRC_MASK BIT(0)
1138#define DA9150_TBAT_STAT_SW1_SHIFT 1
1139#define DA9150_TBAT_STAT_SW1_MASK BIT(1)
1140#define DA9150_TBAT_STAT_SW2_SHIFT 2
1141#define DA9150_TBAT_STAT_SW2_MASK BIT(2)
1142#define DA9150_TBAT_HIGH_CURR_SHIFT 3
1143#define DA9150_TBAT_HIGH_CURR_MASK BIT(3)
1144
1145/* DA9150_TBAT_RES_A = 0x344 */
1146#define DA9150_TBAT_RES_H_SHIFT 0
1147#define DA9150_TBAT_RES_H_MASK (0xff << 0)
1148
1149/* DA9150_TBAT_RES_B = 0x345 */
1150#define DA9150_TBAT_RES_DIS_SHIFT 0
1151#define DA9150_TBAT_RES_DIS_MASK BIT(0)
1152#define DA9150_TBAT_RES_L_SHIFT 6
1153#define DA9150_TBAT_RES_L_MASK (0x03 << 6)
1154
1155#endif /* __DA9150_REGISTERS_H */
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 960b92ad450d..f5043490d67c 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -447,7 +447,6 @@ struct max77686_dev {
447 struct regmap_irq_chip_data *rtc_irq_data; 447 struct regmap_irq_chip_data *rtc_irq_data;
448 448
449 int irq; 449 int irq;
450 bool wakeup;
451 struct mutex irqlock; 450 struct mutex irqlock;
452 int irq_masks_cur[MAX77686_IRQ_GROUP_NR]; 451 int irq_masks_cur[MAX77686_IRQ_GROUP_NR];
453 int irq_masks_cache[MAX77686_IRQ_GROUP_NR]; 452 int irq_masks_cache[MAX77686_IRQ_GROUP_NR];
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index 553f7d09258a..bb995ab9a575 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -119,12 +119,6 @@ enum max77802_regulators {
119 MAX77802_REG_MAX, 119 MAX77802_REG_MAX,
120}; 120};
121 121
122struct max77686_regulator_data {
123 int id;
124 struct regulator_init_data *initdata;
125 struct device_node *of_node;
126};
127
128enum max77686_opmode { 122enum max77686_opmode {
129 MAX77686_OPMODE_NORMAL, 123 MAX77686_OPMODE_NORMAL,
130 MAX77686_OPMODE_LP, 124 MAX77686_OPMODE_LP,
@@ -136,26 +130,4 @@ struct max77686_opmode_data {
136 int mode; 130 int mode;
137}; 131};
138 132
139struct max77686_platform_data {
140 int ono;
141 int wakeup;
142
143 /* ---- PMIC ---- */
144 struct max77686_regulator_data *regulators;
145 int num_regulators;
146
147 struct max77686_opmode_data *opmode_data;
148
149 /*
150 * GPIO-DVS feature is not enabled with the current version of
151 * MAX77686 driver. Buck2/3/4_voltages[0] is used as the default
152 * voltage at probe. DVS/SELB gpios are set as OUTPUT-LOW.
153 */
154 int buck234_gpio_dvs[3]; /* GPIO of [0]DVS1, [1]DVS2, [2]DVS3 */
155 int buck234_gpio_selb[3]; /* [0]SELB2, [1]SELB3, [2]SELB4 */
156 unsigned int buck2_voltage[8]; /* buckx_voltage in uV */
157 unsigned int buck3_voltage[8];
158 unsigned int buck4_voltage[8];
159};
160
161#endif /* __LINUX_MFD_MAX77686_H */ 133#endif /* __LINUX_MFD_MAX77686_H */
diff --git a/include/linux/mfd/qcom_rpm.h b/include/linux/mfd/qcom_rpm.h
new file mode 100644
index 000000000000..742ebf1b76ca
--- /dev/null
+++ b/include/linux/mfd/qcom_rpm.h
@@ -0,0 +1,13 @@
1#ifndef __QCOM_RPM_H__
2#define __QCOM_RPM_H__
3
4#include <linux/types.h>
5
6struct qcom_rpm;
7
8#define QCOM_RPM_ACTIVE_STATE 0
9#define QCOM_RPM_SLEEP_STATE 1
10
11int qcom_rpm_write(struct qcom_rpm *rpm, int state, int resource, u32 *buf, size_t count);
12
13#endif
diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h
new file mode 100644
index 000000000000..1b63fc2f42d1
--- /dev/null
+++ b/include/linux/mfd/rt5033-private.h
@@ -0,0 +1,260 @@
1/*
2 * MFD core driver for Richtek RT5033
3 *
4 * Copyright (C) 2014 Samsung Electronics, Co., Ltd.
5 * Author: Beomho Seo <beomho.seo@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published bythe Free Software Foundation.
10 */
11
12#ifndef __RT5033_PRIVATE_H__
13#define __RT5033_PRIVATE_H__
14
15enum rt5033_reg {
16 RT5033_REG_CHG_STAT = 0x00,
17 RT5033_REG_CHG_CTRL1 = 0x01,
18 RT5033_REG_CHG_CTRL2 = 0x02,
19 RT5033_REG_DEVICE_ID = 0x03,
20 RT5033_REG_CHG_CTRL3 = 0x04,
21 RT5033_REG_CHG_CTRL4 = 0x05,
22 RT5033_REG_CHG_CTRL5 = 0x06,
23 RT5033_REG_RT_CTRL0 = 0x07,
24 RT5033_REG_CHG_RESET = 0x08,
25 /* Reserved 0x09~0x18 */
26 RT5033_REG_RT_CTRL1 = 0x19,
27 /* Reserved 0x1A~0x20 */
28 RT5033_REG_FLED_FUNCTION1 = 0x21,
29 RT5033_REG_FLED_FUNCTION2 = 0x22,
30 RT5033_REG_FLED_STROBE_CTRL1 = 0x23,
31 RT5033_REG_FLED_STROBE_CTRL2 = 0x24,
32 RT5033_REG_FLED_CTRL1 = 0x25,
33 RT5033_REG_FLED_CTRL2 = 0x26,
34 RT5033_REG_FLED_CTRL3 = 0x27,
35 RT5033_REG_FLED_CTRL4 = 0x28,
36 RT5033_REG_FLED_CTRL5 = 0x29,
37 /* Reserved 0x2A~0x40 */
38 RT5033_REG_CTRL = 0x41,
39 RT5033_REG_BUCK_CTRL = 0x42,
40 RT5033_REG_LDO_CTRL = 0x43,
41 /* Reserved 0x44~0x46 */
42 RT5033_REG_MANUAL_RESET_CTRL = 0x47,
43 /* Reserved 0x48~0x5F */
44 RT5033_REG_CHG_IRQ1 = 0x60,
45 RT5033_REG_CHG_IRQ2 = 0x61,
46 RT5033_REG_CHG_IRQ3 = 0x62,
47 RT5033_REG_CHG_IRQ1_CTRL = 0x63,
48 RT5033_REG_CHG_IRQ2_CTRL = 0x64,
49 RT5033_REG_CHG_IRQ3_CTRL = 0x65,
50 RT5033_REG_LED_IRQ_STAT = 0x66,
51 RT5033_REG_LED_IRQ_CTRL = 0x67,
52 RT5033_REG_PMIC_IRQ_STAT = 0x68,
53 RT5033_REG_PMIC_IRQ_CTRL = 0x69,
54 RT5033_REG_SHDN_CTRL = 0x6A,
55 RT5033_REG_OFF_EVENT = 0x6B,
56
57 RT5033_REG_END,
58};
59
60/* RT5033 Charger state register */
61#define RT5033_CHG_STAT_MASK 0x20
62#define RT5033_CHG_STAT_DISCHARGING 0x00
63#define RT5033_CHG_STAT_FULL 0x10
64#define RT5033_CHG_STAT_CHARGING 0x20
65#define RT5033_CHG_STAT_NOT_CHARGING 0x30
66#define RT5033_CHG_STAT_TYPE_MASK 0x60
67#define RT5033_CHG_STAT_TYPE_PRE 0x20
68#define RT5033_CHG_STAT_TYPE_FAST 0x60
69
70/* RT5033 CHGCTRL1 register */
71#define RT5033_CHGCTRL1_IAICR_MASK 0xe0
72#define RT5033_CHGCTRL1_MODE_MASK 0x01
73
74/* RT5033 CHGCTRL2 register */
75#define RT5033_CHGCTRL2_CV_MASK 0xfc
76
77/* RT5033 CHGCTRL3 register */
78#define RT5033_CHGCTRL3_CFO_EN_MASK 0x40
79#define RT5033_CHGCTRL3_TIMER_MASK 0x38
80#define RT5033_CHGCTRL3_TIMER_EN_MASK 0x01
81
82/* RT5033 CHGCTRL4 register */
83#define RT5033_CHGCTRL4_EOC_MASK 0x07
84#define RT5033_CHGCTRL4_IPREC_MASK 0x18
85
86/* RT5033 CHGCTRL5 register */
87#define RT5033_CHGCTRL5_VPREC_MASK 0x0f
88#define RT5033_CHGCTRL5_ICHG_MASK 0xf0
89#define RT5033_CHGCTRL5_ICHG_SHIFT 0x04
90#define RT5033_CHG_MAX_CURRENT 0x0d
91
92/* RT5033 RT CTRL1 register */
93#define RT5033_RT_CTRL1_UUG_MASK 0x02
94#define RT5033_RT_HZ_MASK 0x01
95
96/* RT5033 control register */
97#define RT5033_CTRL_FCCM_BUCK_MASK 0x00
98#define RT5033_CTRL_BUCKOMS_MASK 0x01
99#define RT5033_CTRL_LDOOMS_MASK 0x02
100#define RT5033_CTRL_SLDOOMS_MASK 0x03
101#define RT5033_CTRL_EN_BUCK_MASK 0x04
102#define RT5033_CTRL_EN_LDO_MASK 0x05
103#define RT5033_CTRL_EN_SAFE_LDO_MASK 0x06
104#define RT5033_CTRL_LDO_SLEEP_MASK 0x07
105
106/* RT5033 BUCK control register */
107#define RT5033_BUCK_CTRL_MASK 0x1f
108
109/* RT5033 LDO control register */
110#define RT5033_LDO_CTRL_MASK 0x1f
111
112/* RT5033 charger property - model, manufacturer */
113
114#define RT5033_CHARGER_MODEL "RT5033WSC Charger"
115#define RT5033_MANUFACTURER "Richtek Technology Corporation"
116
117/*
118 * RT5033 charger fast-charge current lmits (as in CHGCTRL1 register),
119 * AICR mode limits the input current for example,
120 * the AIRC 100 mode limits the input current to 100 mA.
121 */
122#define RT5033_AICR_100_MODE 0x20
123#define RT5033_AICR_500_MODE 0x40
124#define RT5033_AICR_700_MODE 0x60
125#define RT5033_AICR_900_MODE 0x80
126#define RT5033_AICR_1500_MODE 0xc0
127#define RT5033_AICR_2000_MODE 0xe0
128#define RT5033_AICR_MODE_MASK 0xe0
129
130/* RT5033 use internal timer need to set time */
131#define RT5033_FAST_CHARGE_TIMER4 0x00
132#define RT5033_FAST_CHARGE_TIMER6 0x01
133#define RT5033_FAST_CHARGE_TIMER8 0x02
134#define RT5033_FAST_CHARGE_TIMER9 0x03
135#define RT5033_FAST_CHARGE_TIMER12 0x04
136#define RT5033_FAST_CHARGE_TIMER14 0x05
137#define RT5033_FAST_CHARGE_TIMER16 0x06
138
139#define RT5033_INT_TIMER_ENABLE 0x01
140
141/* RT5033 charger termination enable mask */
142#define RT5033_TE_ENABLE_MASK 0x08
143
144/*
145 * RT5033 charger opa mode. RT50300 have two opa mode charger mode
146 * and boost mode for OTG
147 */
148
149#define RT5033_CHARGER_MODE 0x00
150#define RT5033_BOOST_MODE 0x01
151
152/* RT5033 charger termination enable */
153#define RT5033_TE_ENABLE 0x08
154
155/* RT5033 charger CFO enable */
156#define RT5033_CFO_ENABLE 0x40
157
158/* RT5033 charger constant charge voltage (as in CHGCTRL2 register), uV */
159#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN 3650000U
160#define RT5033_CHARGER_CONST_VOLTAGE_STEP_NUM 25000U
161#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MAX 4400000U
162
163/* RT5033 charger pre-charge current limits (as in CHGCTRL4 register), uA */
164#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MIN 350000U
165#define RT5033_CHARGER_PRE_CURRENT_STEP_NUM 100000U
166#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MAX 650000U
167
168/* RT5033 charger fast-charge current (as in CHGCTRL5 register), uA */
169#define RT5033_CHARGER_FAST_CURRENT_MIN 700000U
170#define RT5033_CHARGER_FAST_CURRENT_STEP_NUM 100000U
171#define RT5033_CHARGER_FAST_CURRENT_MAX 2000000U
172
173/*
174 * RT5033 charger const-charge end of charger current (
175 * as in CHGCTRL4 register), uA
176 */
177#define RT5033_CHARGER_EOC_MIN 150000U
178#define RT5033_CHARGER_EOC_REF 300000U
179#define RT5033_CHARGER_EOC_STEP_NUM1 50000U
180#define RT5033_CHARGER_EOC_STEP_NUM2 100000U
181#define RT5033_CHARGER_EOC_MAX 600000U
182
183/*
184 * RT5033 charger pre-charge threshold volt limits
185 * (as in CHGCTRL5 register), uV
186 */
187
188#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MIN 2300000U
189#define RT5033_CHARGER_PRE_THRESHOLD_STEP_NUM 100000U
190#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MAX 3800000U
191
192/*
193 * RT5033 charger enable UUG, If UUG enable MOS auto control by H/W charger
194 * circuit.
195 */
196#define RT5033_CHARGER_UUG_ENABLE 0x02
197
198/* RT5033 charger High impedance mode */
199#define RT5033_CHARGER_HZ_DISABLE 0x00
200#define RT5033_CHARGER_HZ_ENABLE 0x01
201
202/* RT5033 regulator BUCK output voltage uV */
203#define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U
204#define RT5033_REGULATOR_BUCK_VOLTAGE_MAX 3000000U
205#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP 100000U
206#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 32
207
208/* RT5033 regulator LDO output voltage uV */
209#define RT5033_REGULATOR_LDO_VOLTAGE_MIN 1200000U
210#define RT5033_REGULATOR_LDO_VOLTAGE_MAX 3000000U
211#define RT5033_REGULATOR_LDO_VOLTAGE_STEP 100000U
212#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 32
213
214/* RT5033 regulator SAFE LDO output voltage uV */
215#define RT5033_REGULATOR_SAFE_LDO_VOLTAGE 4900000U
216
217enum rt5033_fuel_reg {
218 RT5033_FUEL_REG_OCV_H = 0x00,
219 RT5033_FUEL_REG_OCV_L = 0x01,
220 RT5033_FUEL_REG_VBAT_H = 0x02,
221 RT5033_FUEL_REG_VBAT_L = 0x03,
222 RT5033_FUEL_REG_SOC_H = 0x04,
223 RT5033_FUEL_REG_SOC_L = 0x05,
224 RT5033_FUEL_REG_CTRL_H = 0x06,
225 RT5033_FUEL_REG_CTRL_L = 0x07,
226 RT5033_FUEL_REG_CRATE = 0x08,
227 RT5033_FUEL_REG_DEVICE_ID = 0x09,
228 RT5033_FUEL_REG_AVG_VOLT_H = 0x0A,
229 RT5033_FUEL_REG_AVG_VOLT_L = 0x0B,
230 RT5033_FUEL_REG_CONFIG_H = 0x0C,
231 RT5033_FUEL_REG_CONFIG_L = 0x0D,
232 /* Reserved 0x0E~0x0F */
233 RT5033_FUEL_REG_IRQ_CTRL = 0x10,
234 RT5033_FUEL_REG_IRQ_FLAG = 0x11,
235 RT5033_FUEL_VMIN = 0x12,
236 RT5033_FUEL_SMIN = 0x13,
237 /* Reserved 0x14~0x1F */
238 RT5033_FUEL_VGCOMP1 = 0x20,
239 RT5033_FUEL_VGCOMP2 = 0x21,
240 RT5033_FUEL_VGCOMP3 = 0x22,
241 RT5033_FUEL_VGCOMP4 = 0x23,
242 /* Reserved 0x24~0xFD */
243 RT5033_FUEL_MFA_H = 0xFE,
244 RT5033_FUEL_MFA_L = 0xFF,
245
246 RT5033_FUEL_REG_END,
247};
248
249/* RT5033 fuel gauge battery present property */
250#define RT5033_FUEL_BAT_PRESENT 0x02
251
252/* RT5033 PMIC interrupts */
253#define RT5033_PMIC_IRQ_BUCKOCP 2
254#define RT5033_PMIC_IRQ_BUCKLV 3
255#define RT5033_PMIC_IRQ_SAFELDOLV 4
256#define RT5033_PMIC_IRQ_LDOLV 5
257#define RT5033_PMIC_IRQ_OT 6
258#define RT5033_PMIC_IRQ_VDDA_UV 7
259
260#endif /* __RT5033_PRIVATE_H__ */
diff --git a/include/linux/mfd/rt5033.h b/include/linux/mfd/rt5033.h
new file mode 100644
index 000000000000..010cff49a98e
--- /dev/null
+++ b/include/linux/mfd/rt5033.h
@@ -0,0 +1,62 @@
1/*
2 * MFD core driver for the RT5033
3 *
4 * Copyright (C) 2014 Samsung Electronics
5 * Author: Beomho Seo <beomho.seo@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published bythe Free Software Foundation.
10 */
11
12#ifndef __RT5033_H__
13#define __RT5033_H__
14
15#include <linux/regulator/consumer.h>
16#include <linux/i2c.h>
17#include <linux/regmap.h>
18#include <linux/power_supply.h>
19
20/* RT5033 regulator IDs */
21enum rt5033_regulators {
22 RT5033_BUCK = 0,
23 RT5033_LDO,
24 RT5033_SAFE_LDO,
25
26 RT5033_REGULATOR_NUM,
27};
28
29struct rt5033_dev {
30 struct device *dev;
31
32 struct regmap *regmap;
33 struct regmap_irq_chip_data *irq_data;
34 int irq;
35 bool wakeup;
36};
37
38struct rt5033_battery {
39 struct i2c_client *client;
40 struct rt5033_dev *rt5033;
41 struct regmap *regmap;
42 struct power_supply psy;
43};
44
45/* RT5033 charger platform data */
46struct rt5033_charger_data {
47 unsigned int pre_uamp;
48 unsigned int pre_uvolt;
49 unsigned int const_uvolt;
50 unsigned int eoc_uamp;
51 unsigned int fast_uamp;
52};
53
54struct rt5033_charger {
55 struct device *dev;
56 struct rt5033_dev *rt5033;
57 struct power_supply psy;
58
59 struct rt5033_charger_data *chg;
60};
61
62#endif /* __RT5033_H__ */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 2bbc62aa818a..551f85456c11 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -427,7 +427,7 @@ struct mlx4_wqe_inline_seg {
427 427
428enum mlx4_update_qp_attr { 428enum mlx4_update_qp_attr {
429 MLX4_UPDATE_QP_SMAC = 1 << 0, 429 MLX4_UPDATE_QP_SMAC = 1 << 0,
430 MLX4_UPDATE_QP_VSD = 1 << 2, 430 MLX4_UPDATE_QP_VSD = 1 << 1,
431 MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1 431 MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1
432}; 432};
433 433
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 3301c4c289d6..f17fa75809aa 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -227,6 +227,7 @@ struct mtd_info {
227 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); 227 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
228 int (*_suspend) (struct mtd_info *mtd); 228 int (*_suspend) (struct mtd_info *mtd);
229 void (*_resume) (struct mtd_info *mtd); 229 void (*_resume) (struct mtd_info *mtd);
230 void (*_reboot) (struct mtd_info *mtd);
230 /* 231 /*
231 * If the driver is something smart, like UBI, it may need to maintain 232 * If the driver is something smart, like UBI, it may need to maintain
232 * its own reference counting. The below functions are only for driver. 233 * its own reference counting. The below functions are only for driver.
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 63aeccf9ddc8..4720b86ee73d 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -56,6 +56,10 @@
56/* Used for Spansion flashes only. */ 56/* Used for Spansion flashes only. */
57#define SPINOR_OP_BRWR 0x17 /* Bank register write */ 57#define SPINOR_OP_BRWR 0x17 /* Bank register write */
58 58
59/* Used for Micron flashes only. */
60#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */
61#define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */
62
59/* Status Register bits. */ 63/* Status Register bits. */
60#define SR_WIP 1 /* Write in progress */ 64#define SR_WIP 1 /* Write in progress */
61#define SR_WEL 2 /* Write enable latch */ 65#define SR_WEL 2 /* Write enable latch */
@@ -67,6 +71,9 @@
67 71
68#define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */ 72#define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */
69 73
74/* Enhanced Volatile Configuration Register bits */
75#define EVCR_QUAD_EN_MICRON 0x80 /* Micron Quad I/O */
76
70/* Flag Status Register bits */ 77/* Flag Status Register bits */
71#define FSR_READY 0x80 78#define FSR_READY 0x80
72 79
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2007f3b44d05..625c8d71511b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2341,6 +2341,7 @@ struct gro_remcsum {
2341 2341
2342static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) 2342static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2343{ 2343{
2344 grc->offset = 0;
2344 grc->delta = 0; 2345 grc->delta = 0;
2345} 2346}
2346 2347
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 6d627b92df53..2f77e0c651c8 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -180,7 +180,6 @@ struct nfs_inode {
180 /* NFSv4 state */ 180 /* NFSv4 state */
181 struct list_head open_states; 181 struct list_head open_states;
182 struct nfs_delegation __rcu *delegation; 182 struct nfs_delegation __rcu *delegation;
183 fmode_t delegation_state;
184 struct rw_semaphore rwsem; 183 struct rw_semaphore rwsem;
185 184
186 /* pNFS layout information */ 185 /* pNFS layout information */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 38d96ba935c2..4cb3eaa89cf7 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1167,8 +1167,15 @@ struct nfs41_impl_id {
1167 struct nfstime4 date; 1167 struct nfstime4 date;
1168}; 1168};
1169 1169
1170struct nfs41_bind_conn_to_session_args {
1171 struct nfs_client *client;
1172 struct nfs4_sessionid sessionid;
1173 u32 dir;
1174 bool use_conn_in_rdma_mode;
1175};
1176
1170struct nfs41_bind_conn_to_session_res { 1177struct nfs41_bind_conn_to_session_res {
1171 struct nfs4_session *session; 1178 struct nfs4_sessionid sessionid;
1172 u32 dir; 1179 u32 dir;
1173 bool use_conn_in_rdma_mode; 1180 bool use_conn_in_rdma_mode;
1174}; 1181};
@@ -1185,6 +1192,8 @@ struct nfs41_exchange_id_res {
1185 1192
1186struct nfs41_create_session_args { 1193struct nfs41_create_session_args {
1187 struct nfs_client *client; 1194 struct nfs_client *client;
1195 u64 clientid;
1196 uint32_t seqid;
1188 uint32_t flags; 1197 uint32_t flags;
1189 uint32_t cb_program; 1198 uint32_t cb_program;
1190 struct nfs4_channel_attrs fc_attrs; /* Fore Channel */ 1199 struct nfs4_channel_attrs fc_attrs; /* Fore Channel */
@@ -1192,7 +1201,11 @@ struct nfs41_create_session_args {
1192}; 1201};
1193 1202
1194struct nfs41_create_session_res { 1203struct nfs41_create_session_res {
1195 struct nfs_client *client; 1204 struct nfs4_sessionid sessionid;
1205 uint32_t seqid;
1206 uint32_t flags;
1207 struct nfs4_channel_attrs fc_attrs; /* Fore Channel */
1208 struct nfs4_channel_attrs bc_attrs; /* Back Channel */
1196}; 1209};
1197 1210
1198struct nfs41_reclaim_complete_args { 1211struct nfs41_reclaim_complete_args {
@@ -1351,7 +1364,7 @@ struct nfs_commit_completion_ops {
1351}; 1364};
1352 1365
1353struct nfs_commit_info { 1366struct nfs_commit_info {
1354 spinlock_t *lock; 1367 spinlock_t *lock; /* inode->i_lock */
1355 struct nfs_mds_commit_info *mds; 1368 struct nfs_mds_commit_info *mds;
1356 struct pnfs_ds_commit_info *ds; 1369 struct pnfs_ds_commit_info *ds;
1357 struct nfs_direct_req *dreq; /* O_DIRECT request */ 1370 struct nfs_direct_req *dreq; /* O_DIRECT request */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 19a5d4b23209..0adad4a5419b 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -17,7 +17,6 @@
17 17
18#include <uapi/linux/nvme.h> 18#include <uapi/linux/nvme.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/miscdevice.h>
21#include <linux/kref.h> 20#include <linux/kref.h>
22#include <linux/blk-mq.h> 21#include <linux/blk-mq.h>
23 22
@@ -62,8 +61,6 @@ enum {
62 NVME_CSTS_SHST_MASK = 3 << 2, 61 NVME_CSTS_SHST_MASK = 3 << 2,
63}; 62};
64 63
65#define NVME_VS(major, minor) (major << 16 | minor)
66
67extern unsigned char nvme_io_timeout; 64extern unsigned char nvme_io_timeout;
68#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) 65#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
69 66
@@ -91,9 +88,10 @@ struct nvme_dev {
91 struct nvme_bar __iomem *bar; 88 struct nvme_bar __iomem *bar;
92 struct list_head namespaces; 89 struct list_head namespaces;
93 struct kref kref; 90 struct kref kref;
94 struct miscdevice miscdev; 91 struct device *device;
95 work_func_t reset_workfn; 92 work_func_t reset_workfn;
96 struct work_struct reset_work; 93 struct work_struct reset_work;
94 struct work_struct probe_work;
97 char name[12]; 95 char name[12];
98 char serial[20]; 96 char serial[20];
99 char model[40]; 97 char model[40];
@@ -105,7 +103,6 @@ struct nvme_dev {
105 u16 abort_limit; 103 u16 abort_limit;
106 u8 event_limit; 104 u8 event_limit;
107 u8 vwc; 105 u8 vwc;
108 u8 initialized;
109}; 106};
110 107
111/* 108/*
@@ -121,6 +118,7 @@ struct nvme_ns {
121 unsigned ns_id; 118 unsigned ns_id;
122 int lba_shift; 119 int lba_shift;
123 int ms; 120 int ms;
121 int pi_type;
124 u64 mode_select_num_blocks; 122 u64 mode_select_num_blocks;
125 u32 mode_select_block_len; 123 u32 mode_select_block_len;
126}; 124};
@@ -138,6 +136,7 @@ struct nvme_iod {
138 int nents; /* Used in scatterlist */ 136 int nents; /* Used in scatterlist */
139 int length; /* Of data, in bytes */ 137 int length; /* Of data, in bytes */
140 dma_addr_t first_dma; 138 dma_addr_t first_dma;
139 struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
141 struct scatterlist sg[0]; 140 struct scatterlist sg[0];
142}; 141};
143 142
diff --git a/arch/blackfin/include/asm/bfin_rotary.h b/include/linux/platform_data/bfin_rotary.h
index 8895a750c70c..98829370fee2 100644
--- a/arch/blackfin/include/asm/bfin_rotary.h
+++ b/include/linux/platform_data/bfin_rotary.h
@@ -40,6 +40,7 @@ struct bfin_rotary_platform_data {
40 unsigned short debounce; /* 0..17 */ 40 unsigned short debounce; /* 0..17 */
41 unsigned short mode; 41 unsigned short mode;
42 unsigned short pm_wakeup; 42 unsigned short pm_wakeup;
43 unsigned short *pin_list;
43}; 44};
44 45
45/* CNT_CONFIG bitmasks */ 46/* CNT_CONFIG bitmasks */
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index d8155c005242..87ac14c584f2 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -13,10 +13,12 @@
13 13
14#include <linux/device.h> 14#include <linux/device.h>
15 15
16#define DW_DMA_MAX_NR_MASTERS 4
17
16/** 18/**
17 * struct dw_dma_slave - Controller-specific information about a slave 19 * struct dw_dma_slave - Controller-specific information about a slave
18 * 20 *
19 * @dma_dev: required DMA master device. Depricated. 21 * @dma_dev: required DMA master device
20 * @src_id: src request line 22 * @src_id: src request line
21 * @dst_id: dst request line 23 * @dst_id: dst request line
22 * @src_master: src master for transfers on allocated channel. 24 * @src_master: src master for transfers on allocated channel.
@@ -53,7 +55,7 @@ struct dw_dma_platform_data {
53 unsigned char chan_priority; 55 unsigned char chan_priority;
54 unsigned short block_size; 56 unsigned short block_size;
55 unsigned char nr_masters; 57 unsigned char nr_masters;
56 unsigned char data_width[4]; 58 unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
57}; 59};
58 60
59#endif /* _PLATFORM_DATA_DMA_DW_H */ 61#endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h
index 66574ea39f97..0c72886030ef 100644
--- a/include/linux/platform_data/dma-mmp_tdma.h
+++ b/include/linux/platform_data/dma-mmp_tdma.h
@@ -28,6 +28,13 @@ struct sram_platdata {
28 int granularity; 28 int granularity;
29}; 29};
30 30
31#ifdef CONFIG_ARM
31extern struct gen_pool *sram_get_gpool(char *pool_name); 32extern struct gen_pool *sram_get_gpool(char *pool_name);
33#else
34static inline struct gen_pool *sram_get_gpool(char *pool_name)
35{
36 return NULL;
37}
38#endif
32 39
33#endif /* __DMA_MMP_TDMA_H */ 40#endif /* __DMA_MMP_TDMA_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 58851275fed9..d438eeb08bff 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -54,10 +54,11 @@ struct rhash_head {
54 * @buckets: size * hash buckets 54 * @buckets: size * hash buckets
55 */ 55 */
56struct bucket_table { 56struct bucket_table {
57 size_t size; 57 size_t size;
58 unsigned int locks_mask; 58 unsigned int locks_mask;
59 spinlock_t *locks; 59 spinlock_t *locks;
60 struct rhash_head __rcu *buckets[]; 60
61 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
61}; 62};
62 63
63typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); 64typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
@@ -78,12 +79,6 @@ struct rhashtable;
78 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) 79 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
79 * @hashfn: Function to hash key 80 * @hashfn: Function to hash key
80 * @obj_hashfn: Function to hash object 81 * @obj_hashfn: Function to hash object
81 * @grow_decision: If defined, may return true if table should expand
82 * @shrink_decision: If defined, may return true if table should shrink
83 *
84 * Note: when implementing the grow and shrink decision function, min/max
85 * shift must be enforced, otherwise, resizing watermarks they set may be
86 * useless.
87 */ 82 */
88struct rhashtable_params { 83struct rhashtable_params {
89 size_t nelem_hint; 84 size_t nelem_hint;
@@ -97,10 +92,6 @@ struct rhashtable_params {
97 size_t locks_mul; 92 size_t locks_mul;
98 rht_hashfn_t hashfn; 93 rht_hashfn_t hashfn;
99 rht_obj_hashfn_t obj_hashfn; 94 rht_obj_hashfn_t obj_hashfn;
100 bool (*grow_decision)(const struct rhashtable *ht,
101 size_t new_size);
102 bool (*shrink_decision)(const struct rhashtable *ht,
103 size_t new_size);
104}; 95};
105 96
106/** 97/**
@@ -192,9 +183,6 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
192void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); 183void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
193bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); 184bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
194 185
195bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
196bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
197
198int rhashtable_expand(struct rhashtable *ht); 186int rhashtable_expand(struct rhashtable *ht);
199int rhashtable_shrink(struct rhashtable *ht); 187int rhashtable_shrink(struct rhashtable *ht);
200 188
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 41c60e5302d7..6d77432e14ff 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -363,9 +363,6 @@ extern void show_regs(struct pt_regs *);
363 */ 363 */
364extern void show_stack(struct task_struct *task, unsigned long *sp); 364extern void show_stack(struct task_struct *task, unsigned long *sp);
365 365
366void io_schedule(void);
367long io_schedule_timeout(long timeout);
368
369extern void cpu_init (void); 366extern void cpu_init (void);
370extern void trap_init(void); 367extern void trap_init(void);
371extern void update_process_times(int user); 368extern void update_process_times(int user);
@@ -422,6 +419,13 @@ extern signed long schedule_timeout_uninterruptible(signed long timeout);
422asmlinkage void schedule(void); 419asmlinkage void schedule(void);
423extern void schedule_preempt_disabled(void); 420extern void schedule_preempt_disabled(void);
424 421
422extern long io_schedule_timeout(long timeout);
423
424static inline void io_schedule(void)
425{
426 io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
427}
428
425struct nsproxy; 429struct nsproxy;
426struct user_namespace; 430struct user_namespace;
427 431
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h
index 7e61a17030a4..694eecb2f1b5 100644
--- a/include/linux/sunrpc/metrics.h
+++ b/include/linux/sunrpc/metrics.h
@@ -89,8 +89,11 @@ void rpc_free_iostats(struct rpc_iostats *);
89static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; } 89static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; }
90static inline void rpc_count_iostats(const struct rpc_task *task, 90static inline void rpc_count_iostats(const struct rpc_task *task,
91 struct rpc_iostats *stats) {} 91 struct rpc_iostats *stats) {}
92static inline void rpc_count_iostats_metrics(const struct rpc_task *, 92static inline void rpc_count_iostats_metrics(const struct rpc_task *task,
93 struct rpc_iostats *) {} 93 struct rpc_iostats *stats)
94{
95}
96
94static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {} 97static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {}
95static inline void rpc_free_iostats(struct rpc_iostats *stats) {} 98static inline void rpc_free_iostats(struct rpc_iostats *stats) {}
96 99
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index fc52e307efab..5eac316490ea 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -314,6 +314,8 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
314} 314}
315 315
316#endif 316#endif
317
318#if IS_ENABLED(CONFIG_THERMAL)
317struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, 319struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
318 void *, struct thermal_zone_device_ops *, 320 void *, struct thermal_zone_device_ops *,
319 const struct thermal_zone_params *, int, int); 321 const struct thermal_zone_params *, int, int);
@@ -340,8 +342,58 @@ struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
340 struct thermal_cooling_device *, int); 342 struct thermal_cooling_device *, int);
341void thermal_cdev_update(struct thermal_cooling_device *); 343void thermal_cdev_update(struct thermal_cooling_device *);
342void thermal_notify_framework(struct thermal_zone_device *, int); 344void thermal_notify_framework(struct thermal_zone_device *, int);
343 345#else
344#ifdef CONFIG_NET 346static inline struct thermal_zone_device *thermal_zone_device_register(
347 const char *type, int trips, int mask, void *devdata,
348 struct thermal_zone_device_ops *ops,
349 const struct thermal_zone_params *tzp,
350 int passive_delay, int polling_delay)
351{ return ERR_PTR(-ENODEV); }
352static inline void thermal_zone_device_unregister(
353 struct thermal_zone_device *tz)
354{ }
355static inline int thermal_zone_bind_cooling_device(
356 struct thermal_zone_device *tz, int trip,
357 struct thermal_cooling_device *cdev,
358 unsigned long upper, unsigned long lower)
359{ return -ENODEV; }
360static inline int thermal_zone_unbind_cooling_device(
361 struct thermal_zone_device *tz, int trip,
362 struct thermal_cooling_device *cdev)
363{ return -ENODEV; }
364static inline void thermal_zone_device_update(struct thermal_zone_device *tz)
365{ }
366static inline struct thermal_cooling_device *
367thermal_cooling_device_register(char *type, void *devdata,
368 const struct thermal_cooling_device_ops *ops)
369{ return ERR_PTR(-ENODEV); }
370static inline struct thermal_cooling_device *
371thermal_of_cooling_device_register(struct device_node *np,
372 char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
373{ return ERR_PTR(-ENODEV); }
374static inline void thermal_cooling_device_unregister(
375 struct thermal_cooling_device *cdev)
376{ }
377static inline struct thermal_zone_device *thermal_zone_get_zone_by_name(
378 const char *name)
379{ return ERR_PTR(-ENODEV); }
380static inline int thermal_zone_get_temp(
381 struct thermal_zone_device *tz, unsigned long *temp)
382{ return -ENODEV; }
383static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
384{ return -ENODEV; }
385static inline struct thermal_instance *
386get_thermal_instance(struct thermal_zone_device *tz,
387 struct thermal_cooling_device *cdev, int trip)
388{ return ERR_PTR(-ENODEV); }
389static inline void thermal_cdev_update(struct thermal_cooling_device *cdev)
390{ }
391static inline void thermal_notify_framework(struct thermal_zone_device *tz,
392 int trip)
393{ }
394#endif /* CONFIG_THERMAL */
395
396#if defined(CONFIG_NET) && IS_ENABLED(CONFIG_THERMAL)
345extern int thermal_generate_netlink_event(struct thermal_zone_device *tz, 397extern int thermal_generate_netlink_event(struct thermal_zone_device *tz,
346 enum events event); 398 enum events event);
347#else 399#else
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index d3204115f15d..2d67b8998fd8 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -26,6 +26,7 @@
26 * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_* 26 * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
27 * operations documented below 27 * operations documented below
28 * @mmap: Perform mmap(2) on a region of the device file descriptor 28 * @mmap: Perform mmap(2) on a region of the device file descriptor
29 * @request: Request for the bus driver to release the device
29 */ 30 */
30struct vfio_device_ops { 31struct vfio_device_ops {
31 char *name; 32 char *name;
@@ -38,6 +39,7 @@ struct vfio_device_ops {
38 long (*ioctl)(void *device_data, unsigned int cmd, 39 long (*ioctl)(void *device_data, unsigned int cmd,
39 unsigned long arg); 40 unsigned long arg);
40 int (*mmap)(void *device_data, struct vm_area_struct *vma); 41 int (*mmap)(void *device_data, struct vm_area_struct *vma);
42 void (*request)(void *device_data, unsigned int count);
41}; 43};
42 44
43extern int vfio_add_group_dev(struct device *dev, 45extern int vfio_add_group_dev(struct device *dev,
diff --git a/include/linux/virtio_mmio.h b/include/linux/virtio_mmio.h
index 5c7b6f0daef8..c4b09689ab64 100644
--- a/include/linux/virtio_mmio.h
+++ b/include/linux/virtio_mmio.h
@@ -51,23 +51,29 @@
51/* Virtio vendor ID - Read Only */ 51/* Virtio vendor ID - Read Only */
52#define VIRTIO_MMIO_VENDOR_ID 0x00c 52#define VIRTIO_MMIO_VENDOR_ID 0x00c
53 53
54/* Bitmask of the features supported by the host 54/* Bitmask of the features supported by the device (host)
55 * (32 bits per set) - Read Only */ 55 * (32 bits per set) - Read Only */
56#define VIRTIO_MMIO_HOST_FEATURES 0x010 56#define VIRTIO_MMIO_DEVICE_FEATURES 0x010
57 57
58/* Host features set selector - Write Only */ 58/* Device (host) features set selector - Write Only */
59#define VIRTIO_MMIO_HOST_FEATURES_SEL 0x014 59#define VIRTIO_MMIO_DEVICE_FEATURES_SEL 0x014
60 60
61/* Bitmask of features activated by the guest 61/* Bitmask of features activated by the driver (guest)
62 * (32 bits per set) - Write Only */ 62 * (32 bits per set) - Write Only */
63#define VIRTIO_MMIO_GUEST_FEATURES 0x020 63#define VIRTIO_MMIO_DRIVER_FEATURES 0x020
64 64
65/* Activated features set selector - Write Only */ 65/* Activated features set selector - Write Only */
66#define VIRTIO_MMIO_GUEST_FEATURES_SEL 0x024 66#define VIRTIO_MMIO_DRIVER_FEATURES_SEL 0x024
67
68
69#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */
67 70
68/* Guest's memory page size in bytes - Write Only */ 71/* Guest's memory page size in bytes - Write Only */
69#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028 72#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028
70 73
74#endif
75
76
71/* Queue selector - Write Only */ 77/* Queue selector - Write Only */
72#define VIRTIO_MMIO_QUEUE_SEL 0x030 78#define VIRTIO_MMIO_QUEUE_SEL 0x030
73 79
@@ -77,12 +83,21 @@
77/* Queue size for the currently selected queue - Write Only */ 83/* Queue size for the currently selected queue - Write Only */
78#define VIRTIO_MMIO_QUEUE_NUM 0x038 84#define VIRTIO_MMIO_QUEUE_NUM 0x038
79 85
86
87#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */
88
80/* Used Ring alignment for the currently selected queue - Write Only */ 89/* Used Ring alignment for the currently selected queue - Write Only */
81#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c 90#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c
82 91
83/* Guest's PFN for the currently selected queue - Read Write */ 92/* Guest's PFN for the currently selected queue - Read Write */
84#define VIRTIO_MMIO_QUEUE_PFN 0x040 93#define VIRTIO_MMIO_QUEUE_PFN 0x040
85 94
95#endif
96
97
98/* Ready bit for the currently selected queue - Read Write */
99#define VIRTIO_MMIO_QUEUE_READY 0x044
100
86/* Queue notifier - Write Only */ 101/* Queue notifier - Write Only */
87#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050 102#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050
88 103
@@ -95,6 +110,21 @@
95/* Device status register - Read Write */ 110/* Device status register - Read Write */
96#define VIRTIO_MMIO_STATUS 0x070 111#define VIRTIO_MMIO_STATUS 0x070
97 112
113/* Selected queue's Descriptor Table address, 64 bits in two halves */
114#define VIRTIO_MMIO_QUEUE_DESC_LOW 0x080
115#define VIRTIO_MMIO_QUEUE_DESC_HIGH 0x084
116
117/* Selected queue's Available Ring address, 64 bits in two halves */
118#define VIRTIO_MMIO_QUEUE_AVAIL_LOW 0x090
119#define VIRTIO_MMIO_QUEUE_AVAIL_HIGH 0x094
120
121/* Selected queue's Used Ring address, 64 bits in two halves */
122#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0
123#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4
124
125/* Configuration atomicity value */
126#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc
127
98/* The config space is defined by each driver as 128/* The config space is defined by each driver as
99 * the per-driver configuration space - Read Write */ 129 * the per-driver configuration space - Read Write */
100#define VIRTIO_MMIO_CONFIG 0x100 130#define VIRTIO_MMIO_CONFIG 0x100
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index 1c1ad46250d5..fe328c52c46b 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -171,7 +171,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos);
171 * @return Checksum of buffer. 171 * @return Checksum of buffer.
172 */ 172 */
173 173
174u16 cfpkt_iterate(struct cfpkt *pkt, 174int cfpkt_iterate(struct cfpkt *pkt,
175 u16 (*iter_func)(u16 chks, void *buf, u16 len), 175 u16 (*iter_func)(u16 chks, void *buf, u16 len),
176 u16 data); 176 u16 data);
177 177
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index cbcff38ac9b7..d3583d3ee193 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -880,4 +880,18 @@ struct iscsit_global {
880 struct iscsi_portal_group *discovery_tpg; 880 struct iscsi_portal_group *discovery_tpg;
881}; 881};
882 882
883static inline u32 session_get_next_ttt(struct iscsi_session *session)
884{
885 u32 ttt;
886
887 spin_lock_bh(&session->ttt_lock);
888 ttt = session->targ_xfer_tag++;
889 if (ttt == 0xFFFFFFFF)
890 ttt = session->targ_xfer_tag++;
891 spin_unlock_bh(&session->ttt_lock);
892
893 return ttt;
894}
895
896extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
883#endif /* ISCSI_TARGET_CORE_H */ 897#endif /* ISCSI_TARGET_CORE_H */
diff --git a/drivers/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
index 3ff76b4faad3..3ff76b4faad3 100644
--- a/drivers/target/iscsi/iscsi_target_stat.h
+++ b/include/target/iscsi/iscsi_target_stat.h
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index daef9daa500c..e6bb166f12c2 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -1,6 +1,6 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/list.h> 2#include <linux/list.h>
3#include "../../../drivers/target/iscsi/iscsi_target_core.h" 3#include "iscsi_target_core.h"
4 4
5struct iscsit_transport { 5struct iscsit_transport {
6#define ISCSIT_TRANSPORT_NAME 16 6#define ISCSIT_TRANSPORT_NAME 16
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 4a8795a87b9e..672150b6aaf5 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -407,7 +407,7 @@ struct t10_reservation {
407 /* Activate Persistence across Target Power Loss enabled 407 /* Activate Persistence across Target Power Loss enabled
408 * for SCSI device */ 408 * for SCSI device */
409 int pr_aptpl_active; 409 int pr_aptpl_active;
410#define PR_APTPL_BUF_LEN 8192 410#define PR_APTPL_BUF_LEN 262144
411 u32 pr_generation; 411 u32 pr_generation;
412 spinlock_t registration_lock; 412 spinlock_t registration_lock;
413 spinlock_t aptpl_reg_lock; 413 spinlock_t aptpl_reg_lock;
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 611e1c5893b4..b6dec05c7196 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -495,8 +495,7 @@ struct btrfs_ioctl_send_args {
495 495
496/* Error codes as returned by the kernel */ 496/* Error codes as returned by the kernel */
497enum btrfs_err_code { 497enum btrfs_err_code {
498 notused, 498 BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1,
499 BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
500 BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, 499 BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
501 BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, 500 BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
502 BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, 501 BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 26386cf3db44..aef9a81b2d75 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -115,7 +115,13 @@ struct nvme_id_ns {
115 __le16 nawun; 115 __le16 nawun;
116 __le16 nawupf; 116 __le16 nawupf;
117 __le16 nacwu; 117 __le16 nacwu;
118 __u8 rsvd40[80]; 118 __le16 nabsn;
119 __le16 nabo;
120 __le16 nabspf;
121 __u16 rsvd46;
122 __le64 nvmcap[2];
123 __u8 rsvd64[40];
124 __u8 nguid[16];
119 __u8 eui64[8]; 125 __u8 eui64[8];
120 struct nvme_lbaf lbaf[16]; 126 struct nvme_lbaf lbaf[16];
121 __u8 rsvd192[192]; 127 __u8 rsvd192[192];
@@ -124,10 +130,22 @@ struct nvme_id_ns {
124 130
125enum { 131enum {
126 NVME_NS_FEAT_THIN = 1 << 0, 132 NVME_NS_FEAT_THIN = 1 << 0,
133 NVME_NS_FLBAS_LBA_MASK = 0xf,
134 NVME_NS_FLBAS_META_EXT = 0x10,
127 NVME_LBAF_RP_BEST = 0, 135 NVME_LBAF_RP_BEST = 0,
128 NVME_LBAF_RP_BETTER = 1, 136 NVME_LBAF_RP_BETTER = 1,
129 NVME_LBAF_RP_GOOD = 2, 137 NVME_LBAF_RP_GOOD = 2,
130 NVME_LBAF_RP_DEGRADED = 3, 138 NVME_LBAF_RP_DEGRADED = 3,
139 NVME_NS_DPC_PI_LAST = 1 << 4,
140 NVME_NS_DPC_PI_FIRST = 1 << 3,
141 NVME_NS_DPC_PI_TYPE3 = 1 << 2,
142 NVME_NS_DPC_PI_TYPE2 = 1 << 1,
143 NVME_NS_DPC_PI_TYPE1 = 1 << 0,
144 NVME_NS_DPS_PI_FIRST = 1 << 3,
145 NVME_NS_DPS_PI_MASK = 0x7,
146 NVME_NS_DPS_PI_TYPE1 = 1,
147 NVME_NS_DPS_PI_TYPE2 = 2,
148 NVME_NS_DPS_PI_TYPE3 = 3,
131}; 149};
132 150
133struct nvme_smart_log { 151struct nvme_smart_log {
@@ -261,6 +279,10 @@ enum {
261 NVME_RW_DSM_LATENCY_LOW = 3 << 4, 279 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
262 NVME_RW_DSM_SEQ_REQ = 1 << 6, 280 NVME_RW_DSM_SEQ_REQ = 1 << 6,
263 NVME_RW_DSM_COMPRESSED = 1 << 7, 281 NVME_RW_DSM_COMPRESSED = 1 << 7,
282 NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
283 NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
284 NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
285 NVME_RW_PRINFO_PRACT = 1 << 13,
264}; 286};
265 287
266struct nvme_dsm_cmd { 288struct nvme_dsm_cmd {
@@ -549,6 +571,8 @@ struct nvme_passthru_cmd {
549 __u32 result; 571 __u32 result;
550}; 572};
551 573
574#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
575
552#define nvme_admin_cmd nvme_passthru_cmd 576#define nvme_admin_cmd nvme_passthru_cmd
553 577
554#define NVME_IOCTL_ID _IO('N', 0x40) 578#define NVME_IOCTL_ID _IO('N', 0x40)
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 89f63503f903..31891d9535e2 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -185,4 +185,9 @@ struct prctl_mm_map {
185#define PR_MPX_ENABLE_MANAGEMENT 43 185#define PR_MPX_ENABLE_MANAGEMENT 43
186#define PR_MPX_DISABLE_MANAGEMENT 44 186#define PR_MPX_DISABLE_MANAGEMENT 44
187 187
188#define PR_SET_FP_MODE 45
189#define PR_GET_FP_MODE 46
190# define PR_FP_MODE_FR (1 << 0) /* 64b FP registers */
191# define PR_FP_MODE_FRE (1 << 1) /* 32b compatibility */
192
188#endif /* _LINUX_PRCTL_H */ 193#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index 19d5219b0b99..242cf0c6e33d 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -9,3 +9,4 @@ header-y += tc_pedit.h
9header-y += tc_skbedit.h 9header-y += tc_skbedit.h
10header-y += tc_vlan.h 10header-y += tc_vlan.h
11header-y += tc_bpf.h 11header-y += tc_bpf.h
12header-y += tc_connmark.h
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 29715d27548f..82889c30f4f5 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -333,6 +333,7 @@ enum {
333 VFIO_PCI_MSI_IRQ_INDEX, 333 VFIO_PCI_MSI_IRQ_INDEX,
334 VFIO_PCI_MSIX_IRQ_INDEX, 334 VFIO_PCI_MSIX_IRQ_INDEX,
335 VFIO_PCI_ERR_IRQ_INDEX, 335 VFIO_PCI_ERR_IRQ_INDEX,
336 VFIO_PCI_REQ_IRQ_INDEX,
336 VFIO_PCI_NUM_IRQS 337 VFIO_PCI_NUM_IRQS
337}; 338};
338 339
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index be40f7059e93..4b0488f20b2e 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -36,8 +36,7 @@
36/* Size of a PFN in the balloon interface. */ 36/* Size of a PFN in the balloon interface. */
37#define VIRTIO_BALLOON_PFN_SHIFT 12 37#define VIRTIO_BALLOON_PFN_SHIFT 12
38 38
39struct virtio_balloon_config 39struct virtio_balloon_config {
40{
41 /* Number of pages host wants Guest to give up. */ 40 /* Number of pages host wants Guest to give up. */
42 __le32 num_pages; 41 __le32 num_pages;
43 /* Number of pages we've actually got in balloon. */ 42 /* Number of pages we've actually got in balloon. */
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index 247c8ba8544a..3c53eec4ae22 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -31,22 +31,25 @@
31#include <linux/virtio_types.h> 31#include <linux/virtio_types.h>
32 32
33/* Feature bits */ 33/* Feature bits */
34#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */
35#define VIRTIO_BLK_F_SIZE_MAX 1 /* Indicates maximum segment size */ 34#define VIRTIO_BLK_F_SIZE_MAX 1 /* Indicates maximum segment size */
36#define VIRTIO_BLK_F_SEG_MAX 2 /* Indicates maximum # of segments */ 35#define VIRTIO_BLK_F_SEG_MAX 2 /* Indicates maximum # of segments */
37#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ 36#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */
38#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ 37#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */
39#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ 38#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
40#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
41#define VIRTIO_BLK_F_WCE 9 /* Writeback mode enabled after reset */
42#define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */ 39#define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */
43#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */
44#define VIRTIO_BLK_F_MQ 12 /* support more than one vq */ 40#define VIRTIO_BLK_F_MQ 12 /* support more than one vq */
45 41
42/* Legacy feature bits */
43#ifndef VIRTIO_BLK_NO_LEGACY
44#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */
45#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
46#define VIRTIO_BLK_F_WCE 9 /* Writeback mode enabled after reset */
47#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */
46#ifndef __KERNEL__ 48#ifndef __KERNEL__
47/* Old (deprecated) name for VIRTIO_BLK_F_WCE. */ 49/* Old (deprecated) name for VIRTIO_BLK_F_WCE. */
48#define VIRTIO_BLK_F_FLUSH VIRTIO_BLK_F_WCE 50#define VIRTIO_BLK_F_FLUSH VIRTIO_BLK_F_WCE
49#endif 51#endif
52#endif /* !VIRTIO_BLK_NO_LEGACY */
50 53
51#define VIRTIO_BLK_ID_BYTES 20 /* ID string length */ 54#define VIRTIO_BLK_ID_BYTES 20 /* ID string length */
52 55
@@ -100,8 +103,10 @@ struct virtio_blk_config {
100#define VIRTIO_BLK_T_IN 0 103#define VIRTIO_BLK_T_IN 0
101#define VIRTIO_BLK_T_OUT 1 104#define VIRTIO_BLK_T_OUT 1
102 105
106#ifndef VIRTIO_BLK_NO_LEGACY
103/* This bit says it's a scsi command, not an actual read or write. */ 107/* This bit says it's a scsi command, not an actual read or write. */
104#define VIRTIO_BLK_T_SCSI_CMD 2 108#define VIRTIO_BLK_T_SCSI_CMD 2
109#endif /* VIRTIO_BLK_NO_LEGACY */
105 110
106/* Cache flush command */ 111/* Cache flush command */
107#define VIRTIO_BLK_T_FLUSH 4 112#define VIRTIO_BLK_T_FLUSH 4
@@ -109,8 +114,10 @@ struct virtio_blk_config {
109/* Get device ID command */ 114/* Get device ID command */
110#define VIRTIO_BLK_T_GET_ID 8 115#define VIRTIO_BLK_T_GET_ID 8
111 116
117#ifndef VIRTIO_BLK_NO_LEGACY
112/* Barrier before this op. */ 118/* Barrier before this op. */
113#define VIRTIO_BLK_T_BARRIER 0x80000000 119#define VIRTIO_BLK_T_BARRIER 0x80000000
120#endif /* !VIRTIO_BLK_NO_LEGACY */
114 121
115/* This is the first element of the read scatter-gather list. */ 122/* This is the first element of the read scatter-gather list. */
116struct virtio_blk_outhdr { 123struct virtio_blk_outhdr {
@@ -122,12 +129,14 @@ struct virtio_blk_outhdr {
122 __virtio64 sector; 129 __virtio64 sector;
123}; 130};
124 131
132#ifndef VIRTIO_BLK_NO_LEGACY
125struct virtio_scsi_inhdr { 133struct virtio_scsi_inhdr {
126 __virtio32 errors; 134 __virtio32 errors;
127 __virtio32 data_len; 135 __virtio32 data_len;
128 __virtio32 sense_len; 136 __virtio32 sense_len;
129 __virtio32 residual; 137 __virtio32 residual;
130}; 138};
139#endif /* !VIRTIO_BLK_NO_LEGACY */
131 140
132/* And this is the final byte of the write scatter-gather list. */ 141/* And this is the final byte of the write scatter-gather list. */
133#define VIRTIO_BLK_S_OK 0 142#define VIRTIO_BLK_S_OK 0
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index a6d0cdeaacd4..c18264df9504 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -49,12 +49,14 @@
49#define VIRTIO_TRANSPORT_F_START 28 49#define VIRTIO_TRANSPORT_F_START 28
50#define VIRTIO_TRANSPORT_F_END 33 50#define VIRTIO_TRANSPORT_F_END 33
51 51
52#ifndef VIRTIO_CONFIG_NO_LEGACY
52/* Do we get callbacks when the ring is completely used, even if we've 53/* Do we get callbacks when the ring is completely used, even if we've
53 * suppressed them? */ 54 * suppressed them? */
54#define VIRTIO_F_NOTIFY_ON_EMPTY 24 55#define VIRTIO_F_NOTIFY_ON_EMPTY 24
55 56
56/* Can the device handle any descriptor layout? */ 57/* Can the device handle any descriptor layout? */
57#define VIRTIO_F_ANY_LAYOUT 27 58#define VIRTIO_F_ANY_LAYOUT 27
59#endif /* VIRTIO_CONFIG_NO_LEGACY */
58 60
59/* v1.0 compliant. */ 61/* v1.0 compliant. */
60#define VIRTIO_F_VERSION_1 32 62#define VIRTIO_F_VERSION_1 32
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index b5f1677b291c..7bbee79ca293 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -35,7 +35,6 @@
35#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ 35#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
36#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ 36#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
37#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ 37#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
38#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */
39#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ 38#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
40#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ 39#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
41#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */ 40#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */
@@ -56,6 +55,10 @@
56 * Steering */ 55 * Steering */
57#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ 56#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
58 57
58#ifndef VIRTIO_NET_NO_LEGACY
59#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */
60#endif /* VIRTIO_NET_NO_LEGACY */
61
59#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ 62#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
60#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */ 63#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */
61 64
@@ -71,19 +74,39 @@ struct virtio_net_config {
71 __u16 max_virtqueue_pairs; 74 __u16 max_virtqueue_pairs;
72} __attribute__((packed)); 75} __attribute__((packed));
73 76
77/*
78 * This header comes first in the scatter-gather list. If you don't
79 * specify GSO or CSUM features, you can simply ignore the header.
80 *
81 * This is bitwise-equivalent to the legacy struct virtio_net_hdr_mrg_rxbuf,
82 * only flattened.
83 */
84struct virtio_net_hdr_v1 {
85#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */
86#define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */
87 __u8 flags;
88#define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */
89#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
90#define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */
91#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
92#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
93 __u8 gso_type;
94 __virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
95 __virtio16 gso_size; /* Bytes to append to hdr_len per frame */
96 __virtio16 csum_start; /* Position to start checksumming from */
97 __virtio16 csum_offset; /* Offset after that to place checksum */
98 __virtio16 num_buffers; /* Number of merged rx buffers */
99};
100
101#ifndef VIRTIO_NET_NO_LEGACY
74/* This header comes first in the scatter-gather list. 102/* This header comes first in the scatter-gather list.
75 * If VIRTIO_F_ANY_LAYOUT is not negotiated, it must 103 * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must
76 * be the first element of the scatter-gather list. If you don't 104 * be the first element of the scatter-gather list. If you don't
77 * specify GSO or CSUM features, you can simply ignore the header. */ 105 * specify GSO or CSUM features, you can simply ignore the header. */
78struct virtio_net_hdr { 106struct virtio_net_hdr {
79#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset 107 /* See VIRTIO_NET_HDR_F_* */
80#define VIRTIO_NET_HDR_F_DATA_VALID 2 // Csum is valid
81 __u8 flags; 108 __u8 flags;
82#define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame 109 /* See VIRTIO_NET_HDR_GSO_* */
83#define VIRTIO_NET_HDR_GSO_TCPV4 1 // GSO frame, IPv4 TCP (TSO)
84#define VIRTIO_NET_HDR_GSO_UDP 3 // GSO frame, IPv4 UDP (UFO)
85#define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP
86#define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set
87 __u8 gso_type; 110 __u8 gso_type;
88 __virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */ 111 __virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
89 __virtio16 gso_size; /* Bytes to append to hdr_len per frame */ 112 __virtio16 gso_size; /* Bytes to append to hdr_len per frame */
@@ -97,6 +120,7 @@ struct virtio_net_hdr_mrg_rxbuf {
97 struct virtio_net_hdr hdr; 120 struct virtio_net_hdr hdr;
98 __virtio16 num_buffers; /* Number of merged rx buffers */ 121 __virtio16 num_buffers; /* Number of merged rx buffers */
99}; 122};
123#endif /* ...VIRTIO_NET_NO_LEGACY */
100 124
101/* 125/*
102 * Control virtqueue data structures 126 * Control virtqueue data structures
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 35b552c7f330..75301468359f 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -39,7 +39,7 @@
39#ifndef _LINUX_VIRTIO_PCI_H 39#ifndef _LINUX_VIRTIO_PCI_H
40#define _LINUX_VIRTIO_PCI_H 40#define _LINUX_VIRTIO_PCI_H
41 41
42#include <linux/virtio_config.h> 42#include <linux/types.h>
43 43
44#ifndef VIRTIO_PCI_NO_LEGACY 44#ifndef VIRTIO_PCI_NO_LEGACY
45 45
@@ -99,4 +99,95 @@
99/* Vector value used to disable MSI for queue */ 99/* Vector value used to disable MSI for queue */
100#define VIRTIO_MSI_NO_VECTOR 0xffff 100#define VIRTIO_MSI_NO_VECTOR 0xffff
101 101
102#ifndef VIRTIO_PCI_NO_MODERN
103
104/* IDs for different capabilities. Must all exist. */
105
106/* Common configuration */
107#define VIRTIO_PCI_CAP_COMMON_CFG 1
108/* Notifications */
109#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
110/* ISR access */
111#define VIRTIO_PCI_CAP_ISR_CFG 3
112/* Device specific configuration */
113#define VIRTIO_PCI_CAP_DEVICE_CFG 4
114/* PCI configuration access */
115#define VIRTIO_PCI_CAP_PCI_CFG 5
116
117/* This is the PCI capability header: */
118struct virtio_pci_cap {
119 __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
120 __u8 cap_next; /* Generic PCI field: next ptr. */
121 __u8 cap_len; /* Generic PCI field: capability length */
122 __u8 cfg_type; /* Identifies the structure. */
123 __u8 bar; /* Where to find it. */
124 __u8 padding[3]; /* Pad to full dword. */
125 __le32 offset; /* Offset within bar. */
126 __le32 length; /* Length of the structure, in bytes. */
127};
128
129struct virtio_pci_notify_cap {
130 struct virtio_pci_cap cap;
131 __le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
132};
133
134/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
135struct virtio_pci_common_cfg {
136 /* About the whole device. */
137 __le32 device_feature_select; /* read-write */
138 __le32 device_feature; /* read-only */
139 __le32 guest_feature_select; /* read-write */
140 __le32 guest_feature; /* read-write */
141 __le16 msix_config; /* read-write */
142 __le16 num_queues; /* read-only */
143 __u8 device_status; /* read-write */
144 __u8 config_generation; /* read-only */
145
146 /* About a specific virtqueue. */
147 __le16 queue_select; /* read-write */
148 __le16 queue_size; /* read-write, power of 2. */
149 __le16 queue_msix_vector; /* read-write */
150 __le16 queue_enable; /* read-write */
151 __le16 queue_notify_off; /* read-only */
152 __le32 queue_desc_lo; /* read-write */
153 __le32 queue_desc_hi; /* read-write */
154 __le32 queue_avail_lo; /* read-write */
155 __le32 queue_avail_hi; /* read-write */
156 __le32 queue_used_lo; /* read-write */
157 __le32 queue_used_hi; /* read-write */
158};
159
160/* Macro versions of offsets for the Old Timers! */
161#define VIRTIO_PCI_CAP_VNDR 0
162#define VIRTIO_PCI_CAP_NEXT 1
163#define VIRTIO_PCI_CAP_LEN 2
164#define VIRTIO_PCI_CAP_CFG_TYPE 3
165#define VIRTIO_PCI_CAP_BAR 4
166#define VIRTIO_PCI_CAP_OFFSET 8
167#define VIRTIO_PCI_CAP_LENGTH 12
168
169#define VIRTIO_PCI_NOTIFY_CAP_MULT 16
170
171#define VIRTIO_PCI_COMMON_DFSELECT 0
172#define VIRTIO_PCI_COMMON_DF 4
173#define VIRTIO_PCI_COMMON_GFSELECT 8
174#define VIRTIO_PCI_COMMON_GF 12
175#define VIRTIO_PCI_COMMON_MSIX 16
176#define VIRTIO_PCI_COMMON_NUMQ 18
177#define VIRTIO_PCI_COMMON_STATUS 20
178#define VIRTIO_PCI_COMMON_CFGGENERATION 21
179#define VIRTIO_PCI_COMMON_Q_SELECT 22
180#define VIRTIO_PCI_COMMON_Q_SIZE 24
181#define VIRTIO_PCI_COMMON_Q_MSIX 26
182#define VIRTIO_PCI_COMMON_Q_ENABLE 28
183#define VIRTIO_PCI_COMMON_Q_NOFF 30
184#define VIRTIO_PCI_COMMON_Q_DESCLO 32
185#define VIRTIO_PCI_COMMON_Q_DESCHI 36
186#define VIRTIO_PCI_COMMON_Q_AVAILLO 40
187#define VIRTIO_PCI_COMMON_Q_AVAILHI 44
188#define VIRTIO_PCI_COMMON_Q_USEDLO 48
189#define VIRTIO_PCI_COMMON_Q_USEDHI 52
190
191#endif /* VIRTIO_PCI_NO_MODERN */
192
102#endif 193#endif
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 867cc5084afb..b513e662d8e4 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -90,6 +90,7 @@ enum {
90}; 90};
91 91
92enum { 92enum {
93 IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
93 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, 94 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
94 IB_USER_VERBS_EX_CMD_DESTROY_FLOW, 95 IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
95}; 96};
@@ -201,6 +202,28 @@ struct ib_uverbs_query_device_resp {
201 __u8 reserved[4]; 202 __u8 reserved[4];
202}; 203};
203 204
205struct ib_uverbs_ex_query_device {
206 __u32 comp_mask;
207 __u32 reserved;
208};
209
210struct ib_uverbs_odp_caps {
211 __u64 general_caps;
212 struct {
213 __u32 rc_odp_caps;
214 __u32 uc_odp_caps;
215 __u32 ud_odp_caps;
216 } per_transport_caps;
217 __u32 reserved;
218};
219
220struct ib_uverbs_ex_query_device_resp {
221 struct ib_uverbs_query_device_resp base;
222 __u32 comp_mask;
223 __u32 response_length;
224 struct ib_uverbs_odp_caps odp_caps;
225};
226
204struct ib_uverbs_query_port { 227struct ib_uverbs_query_port {
205 __u64 response; 228 __u64 response;
206 __u8 port_num; 229 __u8 port_num;
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 7491ee5d8164..83338210ee04 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -46,4 +46,30 @@ static inline efi_system_table_t __init *xen_efi_probe(void)
46} 46}
47#endif 47#endif
48 48
49#ifdef CONFIG_PREEMPT
50
51static inline void xen_preemptible_hcall_begin(void)
52{
53}
54
55static inline void xen_preemptible_hcall_end(void)
56{
57}
58
59#else
60
61DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
62
63static inline void xen_preemptible_hcall_begin(void)
64{
65 __this_cpu_write(xen_in_preemptible_hcall, true);
66}
67
68static inline void xen_preemptible_hcall_end(void)
69{
70 __this_cpu_write(xen_in_preemptible_hcall, false);
71}
72
73#endif /* CONFIG_PREEMPT */
74
49#endif /* INCLUDE_XEN_OPS_H */ 75#endif /* INCLUDE_XEN_OPS_H */
diff --git a/init/Kconfig b/init/Kconfig
index 058e3671fa11..f5dbc6d4261b 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -921,7 +921,7 @@ config NUMA_BALANCING_DEFAULT_ENABLED
921 machine. 921 machine.
922 922
923menuconfig CGROUPS 923menuconfig CGROUPS
924 boolean "Control Group support" 924 bool "Control Group support"
925 select KERNFS 925 select KERNFS
926 help 926 help
927 This option adds support for grouping sets of processes together, for 927 This option adds support for grouping sets of processes together, for
@@ -1290,8 +1290,8 @@ endif
1290config CC_OPTIMIZE_FOR_SIZE 1290config CC_OPTIMIZE_FOR_SIZE
1291 bool "Optimize for size" 1291 bool "Optimize for size"
1292 help 1292 help
1293 Enabling this option will pass "-Os" instead of "-O2" to gcc 1293 Enabling this option will pass "-Os" instead of "-O2" to
1294 resulting in a smaller kernel. 1294 your compiler resulting in a smaller kernel.
1295 1295
1296 If unsure, say N. 1296 If unsure, say N.
1297 1297
@@ -1762,7 +1762,7 @@ config SLABINFO
1762 default y 1762 default y
1763 1763
1764config RT_MUTEXES 1764config RT_MUTEXES
1765 boolean 1765 bool
1766 1766
1767config BASE_SMALL 1767config BASE_SMALL
1768 int 1768 int
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 07ce18ca71e0..0874e2edd275 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -604,7 +604,7 @@ return_normal:
604 online_cpus) 604 online_cpus)
605 cpu_relax(); 605 cpu_relax();
606 if (!time_left) 606 if (!time_left)
607 pr_crit("KGDB: Timed out waiting for secondary CPUs.\n"); 607 pr_crit("Timed out waiting for secondary CPUs.\n");
608 608
609 /* 609 /*
610 * At this point the primary processor is completely 610 * At this point the primary processor is completely
@@ -696,6 +696,14 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
696 696
697 if (arch_kgdb_ops.enable_nmi) 697 if (arch_kgdb_ops.enable_nmi)
698 arch_kgdb_ops.enable_nmi(0); 698 arch_kgdb_ops.enable_nmi(0);
699 /*
700 * Avoid entering the debugger if we were triggered due to an oops
701 * but panic_timeout indicates the system should automatically
702 * reboot on panic. We don't want to get stuck waiting for input
703 * on such systems, especially if its "just" an oops.
704 */
705 if (signo != SIGTRAP && panic_timeout)
706 return 1;
699 707
700 memset(ks, 0, sizeof(struct kgdb_state)); 708 memset(ks, 0, sizeof(struct kgdb_state));
701 ks->cpu = raw_smp_processor_id(); 709 ks->cpu = raw_smp_processor_id();
@@ -828,6 +836,15 @@ static int kgdb_panic_event(struct notifier_block *self,
828 unsigned long val, 836 unsigned long val,
829 void *data) 837 void *data)
830{ 838{
839 /*
840 * Avoid entering the debugger if we were triggered due to a panic
841 * We don't want to get stuck waiting for input from user in such case.
842 * panic_timeout indicates the system should automatically
843 * reboot on panic.
844 */
845 if (panic_timeout)
846 return NOTIFY_DONE;
847
831 if (dbg_kdb_mode) 848 if (dbg_kdb_mode)
832 kdb_printf("PANIC: %s\n", (char *)data); 849 kdb_printf("PANIC: %s\n", (char *)data);
833 kgdb_breakpoint(); 850 kgdb_breakpoint();
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 7c70812caea5..fc1ef736253c 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -439,7 +439,7 @@ poll_again:
439 * substituted for %d, %x or %o in the prompt. 439 * substituted for %d, %x or %o in the prompt.
440 */ 440 */
441 441
442char *kdb_getstr(char *buffer, size_t bufsize, char *prompt) 442char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt)
443{ 443{
444 if (prompt && kdb_prompt_str != prompt) 444 if (prompt && kdb_prompt_str != prompt)
445 strncpy(kdb_prompt_str, prompt, CMD_BUFLEN); 445 strncpy(kdb_prompt_str, prompt, CMD_BUFLEN);
@@ -548,7 +548,7 @@ static int kdb_search_string(char *searched, char *searchfor)
548 return 0; 548 return 0;
549} 549}
550 550
551int vkdb_printf(const char *fmt, va_list ap) 551int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
552{ 552{
553 int diag; 553 int diag;
554 int linecount; 554 int linecount;
@@ -680,6 +680,12 @@ int vkdb_printf(const char *fmt, va_list ap)
680 size_avail = sizeof(kdb_buffer) - len; 680 size_avail = sizeof(kdb_buffer) - len;
681 goto kdb_print_out; 681 goto kdb_print_out;
682 } 682 }
683 if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH)
684 /*
685 * This was a interactive search (using '/' at more
686 * prompt) and it has completed. Clear the flag.
687 */
688 kdb_grepping_flag = 0;
683 /* 689 /*
684 * at this point the string is a full line and 690 * at this point the string is a full line and
685 * should be printed, up to the null. 691 * should be printed, up to the null.
@@ -691,19 +697,20 @@ kdb_printit:
691 * Write to all consoles. 697 * Write to all consoles.
692 */ 698 */
693 retlen = strlen(kdb_buffer); 699 retlen = strlen(kdb_buffer);
700 cp = (char *) printk_skip_level(kdb_buffer);
694 if (!dbg_kdb_mode && kgdb_connected) { 701 if (!dbg_kdb_mode && kgdb_connected) {
695 gdbstub_msg_write(kdb_buffer, retlen); 702 gdbstub_msg_write(cp, retlen - (cp - kdb_buffer));
696 } else { 703 } else {
697 if (dbg_io_ops && !dbg_io_ops->is_console) { 704 if (dbg_io_ops && !dbg_io_ops->is_console) {
698 len = retlen; 705 len = retlen - (cp - kdb_buffer);
699 cp = kdb_buffer; 706 cp2 = cp;
700 while (len--) { 707 while (len--) {
701 dbg_io_ops->write_char(*cp); 708 dbg_io_ops->write_char(*cp2);
702 cp++; 709 cp2++;
703 } 710 }
704 } 711 }
705 while (c) { 712 while (c) {
706 c->write(c, kdb_buffer, retlen); 713 c->write(c, cp, retlen - (cp - kdb_buffer));
707 touch_nmi_watchdog(); 714 touch_nmi_watchdog();
708 c = c->next; 715 c = c->next;
709 } 716 }
@@ -711,7 +718,10 @@ kdb_printit:
711 if (logging) { 718 if (logging) {
712 saved_loglevel = console_loglevel; 719 saved_loglevel = console_loglevel;
713 console_loglevel = CONSOLE_LOGLEVEL_SILENT; 720 console_loglevel = CONSOLE_LOGLEVEL_SILENT;
714 printk(KERN_INFO "%s", kdb_buffer); 721 if (printk_get_level(kdb_buffer) || src == KDB_MSGSRC_PRINTK)
722 printk("%s", kdb_buffer);
723 else
724 pr_info("%s", kdb_buffer);
715 } 725 }
716 726
717 if (KDB_STATE(PAGER)) { 727 if (KDB_STATE(PAGER)) {
@@ -794,11 +804,23 @@ kdb_printit:
794 kdb_nextline = linecount - 1; 804 kdb_nextline = linecount - 1;
795 kdb_printf("\r"); 805 kdb_printf("\r");
796 suspend_grep = 1; /* for this recursion */ 806 suspend_grep = 1; /* for this recursion */
807 } else if (buf1[0] == '/' && !kdb_grepping_flag) {
808 kdb_printf("\r");
809 kdb_getstr(kdb_grep_string, KDB_GREP_STRLEN,
810 kdbgetenv("SEARCHPROMPT") ?: "search> ");
811 *strchrnul(kdb_grep_string, '\n') = '\0';
812 kdb_grepping_flag += KDB_GREPPING_FLAG_SEARCH;
813 suspend_grep = 1; /* for this recursion */
797 } else if (buf1[0] && buf1[0] != '\n') { 814 } else if (buf1[0] && buf1[0] != '\n') {
798 /* user hit something other than enter */ 815 /* user hit something other than enter */
799 suspend_grep = 1; /* for this recursion */ 816 suspend_grep = 1; /* for this recursion */
800 kdb_printf("\nOnly 'q' or 'Q' are processed at more " 817 if (buf1[0] != '/')
801 "prompt, input ignored\n"); 818 kdb_printf(
819 "\nOnly 'q', 'Q' or '/' are processed at "
820 "more prompt, input ignored\n");
821 else
822 kdb_printf("\n'/' cannot be used during | "
823 "grep filtering, input ignored\n");
802 } else if (kdb_grepping_flag) { 824 } else if (kdb_grepping_flag) {
803 /* user hit enter */ 825 /* user hit enter */
804 suspend_grep = 1; /* for this recursion */ 826 suspend_grep = 1; /* for this recursion */
@@ -844,7 +866,7 @@ int kdb_printf(const char *fmt, ...)
844 int r; 866 int r;
845 867
846 va_start(ap, fmt); 868 va_start(ap, fmt);
847 r = vkdb_printf(fmt, ap); 869 r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap);
848 va_end(ap); 870 va_end(ap);
849 871
850 return r; 872 return r;
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 7b40c5f07dce..4121345498e0 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -50,8 +50,7 @@
50static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE; 50static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE;
51module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600); 51module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600);
52 52
53#define GREP_LEN 256 53char kdb_grep_string[KDB_GREP_STRLEN];
54char kdb_grep_string[GREP_LEN];
55int kdb_grepping_flag; 54int kdb_grepping_flag;
56EXPORT_SYMBOL(kdb_grepping_flag); 55EXPORT_SYMBOL(kdb_grepping_flag);
57int kdb_grep_leading; 56int kdb_grep_leading;
@@ -870,7 +869,7 @@ static void parse_grep(const char *str)
870 len = strlen(cp); 869 len = strlen(cp);
871 if (!len) 870 if (!len)
872 return; 871 return;
873 if (len >= GREP_LEN) { 872 if (len >= KDB_GREP_STRLEN) {
874 kdb_printf("search string too long\n"); 873 kdb_printf("search string too long\n");
875 return; 874 return;
876 } 875 }
@@ -915,13 +914,12 @@ int kdb_parse(const char *cmdstr)
915 char *cp; 914 char *cp;
916 char *cpp, quoted; 915 char *cpp, quoted;
917 kdbtab_t *tp; 916 kdbtab_t *tp;
918 int i, escaped, ignore_errors = 0, check_grep; 917 int i, escaped, ignore_errors = 0, check_grep = 0;
919 918
920 /* 919 /*
921 * First tokenize the command string. 920 * First tokenize the command string.
922 */ 921 */
923 cp = (char *)cmdstr; 922 cp = (char *)cmdstr;
924 kdb_grepping_flag = check_grep = 0;
925 923
926 if (KDB_FLAG(CMD_INTERRUPT)) { 924 if (KDB_FLAG(CMD_INTERRUPT)) {
927 /* Previous command was interrupted, newline must not 925 /* Previous command was interrupted, newline must not
@@ -1247,7 +1245,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
1247 kdb_printf("due to NonMaskable Interrupt @ " 1245 kdb_printf("due to NonMaskable Interrupt @ "
1248 kdb_machreg_fmt "\n", 1246 kdb_machreg_fmt "\n",
1249 instruction_pointer(regs)); 1247 instruction_pointer(regs));
1250 kdb_dumpregs(regs);
1251 break; 1248 break;
1252 case KDB_REASON_SSTEP: 1249 case KDB_REASON_SSTEP:
1253 case KDB_REASON_BREAK: 1250 case KDB_REASON_BREAK:
@@ -1281,6 +1278,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
1281 */ 1278 */
1282 kdb_nextline = 1; 1279 kdb_nextline = 1;
1283 KDB_STATE_CLEAR(SUPPRESS); 1280 KDB_STATE_CLEAR(SUPPRESS);
1281 kdb_grepping_flag = 0;
1282 /* ensure the old search does not leak into '/' commands */
1283 kdb_grep_string[0] = '\0';
1284 1284
1285 cmdbuf = cmd_cur; 1285 cmdbuf = cmd_cur;
1286 *cmdbuf = '\0'; 1286 *cmdbuf = '\0';
@@ -2256,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv)
2256 /* 2256 /*
2257 * Validate cpunum 2257 * Validate cpunum
2258 */ 2258 */
2259 if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb) 2259 if ((cpunum >= CONFIG_NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
2260 return KDB_BADCPUNUM; 2260 return KDB_BADCPUNUM;
2261 2261
2262 dbg_switch_cpu = cpunum; 2262 dbg_switch_cpu = cpunum;
@@ -2583,7 +2583,7 @@ static int kdb_summary(int argc, const char **argv)
2583#define K(x) ((x) << (PAGE_SHIFT - 10)) 2583#define K(x) ((x) << (PAGE_SHIFT - 10))
2584 kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n" 2584 kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n"
2585 "Buffers: %8lu kB\n", 2585 "Buffers: %8lu kB\n",
2586 val.totalram, val.freeram, val.bufferram); 2586 K(val.totalram), K(val.freeram), K(val.bufferram));
2587 return 0; 2587 return 0;
2588} 2588}
2589 2589
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index eaacd1693954..75014d7f4568 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -196,7 +196,9 @@ extern int kdb_main_loop(kdb_reason_t, kdb_reason_t,
196 196
197/* Miscellaneous functions and data areas */ 197/* Miscellaneous functions and data areas */
198extern int kdb_grepping_flag; 198extern int kdb_grepping_flag;
199#define KDB_GREPPING_FLAG_SEARCH 0x8000
199extern char kdb_grep_string[]; 200extern char kdb_grep_string[];
201#define KDB_GREP_STRLEN 256
200extern int kdb_grep_leading; 202extern int kdb_grep_leading;
201extern int kdb_grep_trailing; 203extern int kdb_grep_trailing;
202extern char *kdb_cmds[]; 204extern char *kdb_cmds[];
@@ -209,7 +211,7 @@ extern void kdb_ps1(const struct task_struct *p);
209extern void kdb_print_nameval(const char *name, unsigned long val); 211extern void kdb_print_nameval(const char *name, unsigned long val);
210extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info); 212extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
211extern void kdb_meminfo_proc_show(void); 213extern void kdb_meminfo_proc_show(void);
212extern char *kdb_getstr(char *, size_t, char *); 214extern char *kdb_getstr(char *, size_t, const char *);
213extern void kdb_gdb_state_pass(char *buf); 215extern void kdb_gdb_state_pass(char *buf);
214 216
215/* Defines for kdb_symbol_print */ 217/* Defines for kdb_symbol_print */
diff --git a/kernel/gcov/Makefile b/kernel/gcov/Makefile
index 52aa7e8de927..752d6486b67e 100644
--- a/kernel/gcov/Makefile
+++ b/kernel/gcov/Makefile
@@ -1,33 +1,7 @@
1ccflags-y := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"' 1ccflags-y := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"'
2 2
3# if-lt 3obj-y := base.o fs.o
4# Usage VAR := $(call if-lt, $(a), $(b)) 4obj-$(CONFIG_GCOV_FORMAT_3_4) += gcc_3_4.o
5# Returns 1 if (a < b) 5obj-$(CONFIG_GCOV_FORMAT_4_7) += gcc_4_7.o
6if-lt = $(shell [ $(1) -lt $(2) ] && echo 1) 6obj-$(CONFIG_GCOV_FORMAT_AUTODETECT) += $(call cc-ifversion, -lt, 0407, \
7 7 gcc_3_4.o, gcc_4_7.o)
8ifeq ($(CONFIG_GCOV_FORMAT_3_4),y)
9 cc-ver := 0304
10else ifeq ($(CONFIG_GCOV_FORMAT_4_7),y)
11 cc-ver := 0407
12else
13# Use cc-version if available, otherwise set 0
14#
15# scripts/Kbuild.include, which contains cc-version function, is not included
16# during make clean "make -f scripts/Makefile.clean obj=kernel/gcov"
17# Meaning cc-ver is empty causing if-lt test to fail with
18# "/bin/sh: line 0: [: -lt: unary operator expected" error mesage.
19# This has no affect on the clean phase, but the error message could be
20# confusing/annoying. So this dummy workaround sets cc-ver to zero if cc-version
21# is not available. We can probably move if-lt to Kbuild.include, so it's also
22# not defined during clean or to include Kbuild.include in
23# scripts/Makefile.clean. But the following workaround seems least invasive.
24 cc-ver := $(if $(call cc-version),$(call cc-version),0)
25endif
26
27obj-$(CONFIG_GCOV_KERNEL) := base.o fs.o
28
29ifeq ($(call if-lt, $(cc-ver), 0407),1)
30 obj-$(CONFIG_GCOV_KERNEL) += gcc_3_4.o
31else
32 obj-$(CONFIG_GCOV_KERNEL) += gcc_4_7.o
33endif
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index ff7f47d026ac..782172f073c5 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -314,12 +314,12 @@ static void notrace klp_ftrace_handler(unsigned long ip,
314 rcu_read_lock(); 314 rcu_read_lock();
315 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, 315 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
316 stack_node); 316 stack_node);
317 rcu_read_unlock();
318
319 if (WARN_ON_ONCE(!func)) 317 if (WARN_ON_ONCE(!func))
320 return; 318 goto unlock;
321 319
322 klp_arch_set_pc(regs, (unsigned long)func->new_func); 320 klp_arch_set_pc(regs, (unsigned long)func->new_func);
321unlock:
322 rcu_read_unlock();
323} 323}
324 324
325static int klp_disable_func(struct klp_func *func) 325static int klp_disable_func(struct klp_func *func)
@@ -731,7 +731,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
731 func->state = KLP_DISABLED; 731 func->state = KLP_DISABLED;
732 732
733 return kobject_init_and_add(&func->kobj, &klp_ktype_func, 733 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
734 obj->kobj, func->old_name); 734 obj->kobj, "%s", func->old_name);
735} 735}
736 736
737/* parts of the initialization that is done only when the object is loaded */ 737/* parts of the initialization that is done only when the object is loaded */
@@ -807,7 +807,7 @@ static int klp_init_patch(struct klp_patch *patch)
807 patch->state = KLP_DISABLED; 807 patch->state = KLP_DISABLED;
808 808
809 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, 809 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
810 klp_root_kobj, patch->mod->name); 810 klp_root_kobj, "%s", patch->mod->name);
811 if (ret) 811 if (ret)
812 goto unlock; 812 goto unlock;
813 813
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 3059bc2f022d..6357265a31ad 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1193,7 +1193,9 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
1193 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); 1193 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
1194 1194
1195 if (unlikely(ret)) { 1195 if (unlikely(ret)) {
1196 remove_waiter(lock, &waiter); 1196 __set_current_state(TASK_RUNNING);
1197 if (rt_mutex_has_waiters(lock))
1198 remove_waiter(lock, &waiter);
1197 rt_mutex_handle_deadlock(ret, chwalk, &waiter); 1199 rt_mutex_handle_deadlock(ret, chwalk, &waiter);
1198 } 1200 }
1199 1201
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index c06df7de0963..01cfd69c54c6 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1811,7 +1811,7 @@ int vprintk_default(const char *fmt, va_list args)
1811 1811
1812#ifdef CONFIG_KGDB_KDB 1812#ifdef CONFIG_KGDB_KDB
1813 if (unlikely(kdb_trap_printk)) { 1813 if (unlikely(kdb_trap_printk)) {
1814 r = vkdb_printf(fmt, args); 1814 r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
1815 return r; 1815 return r;
1816 } 1816 }
1817#endif 1817#endif
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 0d7bbe3095ad..0a571e9a0f1d 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -326,6 +326,7 @@ void rcu_read_unlock_special(struct task_struct *t)
326 special = t->rcu_read_unlock_special; 326 special = t->rcu_read_unlock_special;
327 if (special.b.need_qs) { 327 if (special.b.need_qs) {
328 rcu_preempt_qs(); 328 rcu_preempt_qs();
329 t->rcu_read_unlock_special.b.need_qs = false;
329 if (!t->rcu_read_unlock_special.s) { 330 if (!t->rcu_read_unlock_special.s) {
330 local_irq_restore(flags); 331 local_irq_restore(flags);
331 return; 332 return;
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index 8a2e230fb86a..eae160dd669d 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -87,8 +87,7 @@ static inline struct autogroup *autogroup_create(void)
87 * so we don't have to move tasks around upon policy change, 87 * so we don't have to move tasks around upon policy change,
88 * or flail around trying to allocate bandwidth on the fly. 88 * or flail around trying to allocate bandwidth on the fly.
89 * A bandwidth exception in __sched_setscheduler() allows 89 * A bandwidth exception in __sched_setscheduler() allows
90 * the policy change to proceed. Thereafter, task_group() 90 * the policy change to proceed.
91 * returns &root_task_group, so zero bandwidth is required.
92 */ 91 */
93 free_rt_sched_group(tg); 92 free_rt_sched_group(tg);
94 tg->rt_se = root_task_group.rt_se; 93 tg->rt_se = root_task_group.rt_se;
@@ -115,9 +114,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
115 if (tg != &root_task_group) 114 if (tg != &root_task_group)
116 return false; 115 return false;
117 116
118 if (p->sched_class != &fair_sched_class)
119 return false;
120
121 /* 117 /*
122 * We can only assume the task group can't go away on us if 118 * We can only assume the task group can't go away on us if
123 * autogroup_move_group() can see us on ->thread_group list. 119 * autogroup_move_group() can see us on ->thread_group list.
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index 7052d3fd4e7b..8d0f35debf35 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -274,7 +274,7 @@ bool try_wait_for_completion(struct completion *x)
274 * first without taking the lock so we can 274 * first without taking the lock so we can
275 * return early in the blocking case. 275 * return early in the blocking case.
276 */ 276 */
277 if (!ACCESS_ONCE(x->done)) 277 if (!READ_ONCE(x->done))
278 return 0; 278 return 0;
279 279
280 spin_lock_irqsave(&x->wait.lock, flags); 280 spin_lock_irqsave(&x->wait.lock, flags);
@@ -297,6 +297,21 @@ EXPORT_SYMBOL(try_wait_for_completion);
297 */ 297 */
298bool completion_done(struct completion *x) 298bool completion_done(struct completion *x)
299{ 299{
300 return !!ACCESS_ONCE(x->done); 300 if (!READ_ONCE(x->done))
301 return false;
302
303 /*
304 * If ->done, we need to wait for complete() to release ->wait.lock
305 * otherwise we can end up freeing the completion before complete()
306 * is done referencing it.
307 *
308 * The RMB pairs with complete()'s RELEASE of ->wait.lock and orders
309 * the loads of ->done and ->wait.lock such that we cannot observe
310 * the lock before complete() acquires it while observing the ->done
311 * after it's acquired the lock.
312 */
313 smp_rmb();
314 spin_unlock_wait(&x->wait.lock);
315 return true;
301} 316}
302EXPORT_SYMBOL(completion_done); 317EXPORT_SYMBOL(completion_done);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 13049aac05a6..f0f831e8a345 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -307,66 +307,6 @@ __read_mostly int scheduler_running;
307int sysctl_sched_rt_runtime = 950000; 307int sysctl_sched_rt_runtime = 950000;
308 308
309/* 309/*
310 * __task_rq_lock - lock the rq @p resides on.
311 */
312static inline struct rq *__task_rq_lock(struct task_struct *p)
313 __acquires(rq->lock)
314{
315 struct rq *rq;
316
317 lockdep_assert_held(&p->pi_lock);
318
319 for (;;) {
320 rq = task_rq(p);
321 raw_spin_lock(&rq->lock);
322 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
323 return rq;
324 raw_spin_unlock(&rq->lock);
325
326 while (unlikely(task_on_rq_migrating(p)))
327 cpu_relax();
328 }
329}
330
331/*
332 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
333 */
334static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
335 __acquires(p->pi_lock)
336 __acquires(rq->lock)
337{
338 struct rq *rq;
339
340 for (;;) {
341 raw_spin_lock_irqsave(&p->pi_lock, *flags);
342 rq = task_rq(p);
343 raw_spin_lock(&rq->lock);
344 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
345 return rq;
346 raw_spin_unlock(&rq->lock);
347 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
348
349 while (unlikely(task_on_rq_migrating(p)))
350 cpu_relax();
351 }
352}
353
354static void __task_rq_unlock(struct rq *rq)
355 __releases(rq->lock)
356{
357 raw_spin_unlock(&rq->lock);
358}
359
360static inline void
361task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
362 __releases(rq->lock)
363 __releases(p->pi_lock)
364{
365 raw_spin_unlock(&rq->lock);
366 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
367}
368
369/*
370 * this_rq_lock - lock this runqueue and disable interrupts. 310 * this_rq_lock - lock this runqueue and disable interrupts.
371 */ 311 */
372static struct rq *this_rq_lock(void) 312static struct rq *this_rq_lock(void)
@@ -2899,7 +2839,7 @@ void __sched schedule_preempt_disabled(void)
2899 preempt_disable(); 2839 preempt_disable();
2900} 2840}
2901 2841
2902static void preempt_schedule_common(void) 2842static void __sched notrace preempt_schedule_common(void)
2903{ 2843{
2904 do { 2844 do {
2905 __preempt_count_add(PREEMPT_ACTIVE); 2845 __preempt_count_add(PREEMPT_ACTIVE);
@@ -4418,36 +4358,29 @@ EXPORT_SYMBOL_GPL(yield_to);
4418 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 4358 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
4419 * that process accounting knows that this is a task in IO wait state. 4359 * that process accounting knows that this is a task in IO wait state.
4420 */ 4360 */
4421void __sched io_schedule(void)
4422{
4423 struct rq *rq = raw_rq();
4424
4425 delayacct_blkio_start();
4426 atomic_inc(&rq->nr_iowait);
4427 blk_flush_plug(current);
4428 current->in_iowait = 1;
4429 schedule();
4430 current->in_iowait = 0;
4431 atomic_dec(&rq->nr_iowait);
4432 delayacct_blkio_end();
4433}
4434EXPORT_SYMBOL(io_schedule);
4435
4436long __sched io_schedule_timeout(long timeout) 4361long __sched io_schedule_timeout(long timeout)
4437{ 4362{
4438 struct rq *rq = raw_rq(); 4363 int old_iowait = current->in_iowait;
4364 struct rq *rq;
4439 long ret; 4365 long ret;
4440 4366
4367 current->in_iowait = 1;
4368 if (old_iowait)
4369 blk_schedule_flush_plug(current);
4370 else
4371 blk_flush_plug(current);
4372
4441 delayacct_blkio_start(); 4373 delayacct_blkio_start();
4374 rq = raw_rq();
4442 atomic_inc(&rq->nr_iowait); 4375 atomic_inc(&rq->nr_iowait);
4443 blk_flush_plug(current);
4444 current->in_iowait = 1;
4445 ret = schedule_timeout(timeout); 4376 ret = schedule_timeout(timeout);
4446 current->in_iowait = 0; 4377 current->in_iowait = old_iowait;
4447 atomic_dec(&rq->nr_iowait); 4378 atomic_dec(&rq->nr_iowait);
4448 delayacct_blkio_end(); 4379 delayacct_blkio_end();
4380
4449 return ret; 4381 return ret;
4450} 4382}
4383EXPORT_SYMBOL(io_schedule_timeout);
4451 4384
4452/** 4385/**
4453 * sys_sched_get_priority_max - return maximum RT priority. 4386 * sys_sched_get_priority_max - return maximum RT priority.
@@ -7642,6 +7575,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
7642{ 7575{
7643 struct task_struct *g, *p; 7576 struct task_struct *g, *p;
7644 7577
7578 /*
7579 * Autogroups do not have RT tasks; see autogroup_create().
7580 */
7581 if (task_group_is_autogroup(tg))
7582 return 0;
7583
7645 for_each_process_thread(g, p) { 7584 for_each_process_thread(g, p) {
7646 if (rt_task(p) && task_group(p) == tg) 7585 if (rt_task(p) && task_group(p) == tg)
7647 return 1; 7586 return 1;
@@ -7734,6 +7673,17 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
7734{ 7673{
7735 int i, err = 0; 7674 int i, err = 0;
7736 7675
7676 /*
7677 * Disallowing the root group RT runtime is BAD, it would disallow the
7678 * kernel creating (and or operating) RT threads.
7679 */
7680 if (tg == &root_task_group && rt_runtime == 0)
7681 return -EINVAL;
7682
7683 /* No period doesn't make any sense. */
7684 if (rt_period == 0)
7685 return -EINVAL;
7686
7737 mutex_lock(&rt_constraints_mutex); 7687 mutex_lock(&rt_constraints_mutex);
7738 read_lock(&tasklist_lock); 7688 read_lock(&tasklist_lock);
7739 err = __rt_schedulable(tg, rt_period, rt_runtime); 7689 err = __rt_schedulable(tg, rt_period, rt_runtime);
@@ -7790,9 +7740,6 @@ static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
7790 rt_period = (u64)rt_period_us * NSEC_PER_USEC; 7740 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
7791 rt_runtime = tg->rt_bandwidth.rt_runtime; 7741 rt_runtime = tg->rt_bandwidth.rt_runtime;
7792 7742
7793 if (rt_period == 0)
7794 return -EINVAL;
7795
7796 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7743 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7797} 7744}
7798 7745
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index a027799ae130..3fa8fa6d9403 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -511,16 +511,10 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
511 struct sched_dl_entity, 511 struct sched_dl_entity,
512 dl_timer); 512 dl_timer);
513 struct task_struct *p = dl_task_of(dl_se); 513 struct task_struct *p = dl_task_of(dl_se);
514 unsigned long flags;
514 struct rq *rq; 515 struct rq *rq;
515again:
516 rq = task_rq(p);
517 raw_spin_lock(&rq->lock);
518 516
519 if (rq != task_rq(p)) { 517 rq = task_rq_lock(current, &flags);
520 /* Task was moved, retrying. */
521 raw_spin_unlock(&rq->lock);
522 goto again;
523 }
524 518
525 /* 519 /*
526 * We need to take care of several possible races here: 520 * We need to take care of several possible races here:
@@ -541,6 +535,26 @@ again:
541 535
542 sched_clock_tick(); 536 sched_clock_tick();
543 update_rq_clock(rq); 537 update_rq_clock(rq);
538
539 /*
540 * If the throttle happened during sched-out; like:
541 *
542 * schedule()
543 * deactivate_task()
544 * dequeue_task_dl()
545 * update_curr_dl()
546 * start_dl_timer()
547 * __dequeue_task_dl()
548 * prev->on_rq = 0;
549 *
550 * We can be both throttled and !queued. Replenish the counter
551 * but do not enqueue -- wait for our wakeup to do that.
552 */
553 if (!task_on_rq_queued(p)) {
554 replenish_dl_entity(dl_se, dl_se);
555 goto unlock;
556 }
557
544 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 558 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
545 if (dl_task(rq->curr)) 559 if (dl_task(rq->curr))
546 check_preempt_curr_dl(rq, p, 0); 560 check_preempt_curr_dl(rq, p, 0);
@@ -555,7 +569,7 @@ again:
555 push_dl_task(rq); 569 push_dl_task(rq);
556#endif 570#endif
557unlock: 571unlock:
558 raw_spin_unlock(&rq->lock); 572 task_rq_unlock(rq, current, &flags);
559 573
560 return HRTIMER_NORESTART; 574 return HRTIMER_NORESTART;
561} 575}
@@ -898,6 +912,7 @@ static void yield_task_dl(struct rq *rq)
898 rq->curr->dl.dl_yielded = 1; 912 rq->curr->dl.dl_yielded = 1;
899 p->dl.runtime = 0; 913 p->dl.runtime = 0;
900 } 914 }
915 update_rq_clock(rq);
901 update_curr_dl(rq); 916 update_curr_dl(rq);
902} 917}
903 918
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0870db23d79c..dc0f435a2779 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1380,6 +1380,82 @@ static inline void sched_avg_update(struct rq *rq) { }
1380 1380
1381extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period); 1381extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
1382 1382
1383/*
1384 * __task_rq_lock - lock the rq @p resides on.
1385 */
1386static inline struct rq *__task_rq_lock(struct task_struct *p)
1387 __acquires(rq->lock)
1388{
1389 struct rq *rq;
1390
1391 lockdep_assert_held(&p->pi_lock);
1392
1393 for (;;) {
1394 rq = task_rq(p);
1395 raw_spin_lock(&rq->lock);
1396 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
1397 return rq;
1398 raw_spin_unlock(&rq->lock);
1399
1400 while (unlikely(task_on_rq_migrating(p)))
1401 cpu_relax();
1402 }
1403}
1404
1405/*
1406 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
1407 */
1408static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
1409 __acquires(p->pi_lock)
1410 __acquires(rq->lock)
1411{
1412 struct rq *rq;
1413
1414 for (;;) {
1415 raw_spin_lock_irqsave(&p->pi_lock, *flags);
1416 rq = task_rq(p);
1417 raw_spin_lock(&rq->lock);
1418 /*
1419 * move_queued_task() task_rq_lock()
1420 *
1421 * ACQUIRE (rq->lock)
1422 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
1423 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
1424 * [S] ->cpu = new_cpu [L] task_rq()
1425 * [L] ->on_rq
1426 * RELEASE (rq->lock)
1427 *
1428 * If we observe the old cpu in task_rq_lock, the acquire of
1429 * the old rq->lock will fully serialize against the stores.
1430 *
1431 * If we observe the new cpu in task_rq_lock, the acquire will
1432 * pair with the WMB to ensure we must then also see migrating.
1433 */
1434 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
1435 return rq;
1436 raw_spin_unlock(&rq->lock);
1437 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1438
1439 while (unlikely(task_on_rq_migrating(p)))
1440 cpu_relax();
1441 }
1442}
1443
1444static inline void __task_rq_unlock(struct rq *rq)
1445 __releases(rq->lock)
1446{
1447 raw_spin_unlock(&rq->lock);
1448}
1449
1450static inline void
1451task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
1452 __releases(rq->lock)
1453 __releases(p->pi_lock)
1454{
1455 raw_spin_unlock(&rq->lock);
1456 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1457}
1458
1383#ifdef CONFIG_SMP 1459#ifdef CONFIG_SMP
1384#ifdef CONFIG_PREEMPT 1460#ifdef CONFIG_PREEMPT
1385 1461
diff --git a/kernel/sys.c b/kernel/sys.c
index ea9c88109894..a03d9cd23ed7 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -97,6 +97,12 @@
97#ifndef MPX_DISABLE_MANAGEMENT 97#ifndef MPX_DISABLE_MANAGEMENT
98# define MPX_DISABLE_MANAGEMENT(a) (-EINVAL) 98# define MPX_DISABLE_MANAGEMENT(a) (-EINVAL)
99#endif 99#endif
100#ifndef GET_FP_MODE
101# define GET_FP_MODE(a) (-EINVAL)
102#endif
103#ifndef SET_FP_MODE
104# define SET_FP_MODE(a,b) (-EINVAL)
105#endif
100 106
101/* 107/*
102 * this is where the system-wide overflow UID and GID are defined, for 108 * this is where the system-wide overflow UID and GID are defined, for
@@ -1102,6 +1108,7 @@ DECLARE_RWSEM(uts_sem);
1102/* 1108/*
1103 * Work around broken programs that cannot handle "Linux 3.0". 1109 * Work around broken programs that cannot handle "Linux 3.0".
1104 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 1110 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1111 * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
1105 */ 1112 */
1106static int override_release(char __user *release, size_t len) 1113static int override_release(char __user *release, size_t len)
1107{ 1114{
@@ -1121,7 +1128,7 @@ static int override_release(char __user *release, size_t len)
1121 break; 1128 break;
1122 rest++; 1129 rest++;
1123 } 1130 }
1124 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; 1131 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1125 copy = clamp_t(size_t, len, 1, sizeof(buf)); 1132 copy = clamp_t(size_t, len, 1, sizeof(buf));
1126 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); 1133 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1127 ret = copy_to_user(release, buf, copy + 1); 1134 ret = copy_to_user(release, buf, copy + 1);
@@ -2219,6 +2226,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2219 return -EINVAL; 2226 return -EINVAL;
2220 error = MPX_DISABLE_MANAGEMENT(me); 2227 error = MPX_DISABLE_MANAGEMENT(me);
2221 break; 2228 break;
2229 case PR_SET_FP_MODE:
2230 error = SET_FP_MODE(me, arg2);
2231 break;
2232 case PR_GET_FP_MODE:
2233 error = GET_FP_MODE(me);
2234 break;
2222 default: 2235 default:
2223 error = -EINVAL; 2236 error = -EINVAL;
2224 break; 2237 break;
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 4b585e0fdd22..0f60b08a4f07 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -633,10 +633,14 @@ int ntp_validate_timex(struct timex *txc)
633 if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME))) 633 if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
634 return -EPERM; 634 return -EPERM;
635 635
636 if (txc->modes & ADJ_FREQUENCY) { 636 /*
637 if (LONG_MIN / PPM_SCALE > txc->freq) 637 * Check for potential multiplication overflows that can
638 * only happen on 64-bit systems:
639 */
640 if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
641 if (LLONG_MIN / PPM_SCALE > txc->freq)
638 return -EINVAL; 642 return -EINVAL;
639 if (LONG_MAX / PPM_SCALE < txc->freq) 643 if (LLONG_MAX / PPM_SCALE < txc->freq)
640 return -EINVAL; 644 return -EINVAL;
641 } 645 }
642 646
diff --git a/lib/Kconfig b/lib/Kconfig
index cb9758e0ba0c..87da53bb1fef 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -23,7 +23,7 @@ config HAVE_ARCH_BITREVERSE
23 have this capability. 23 have this capability.
24 24
25config RATIONAL 25config RATIONAL
26 boolean 26 bool
27 27
28config GENERIC_STRNCPY_FROM_USER 28config GENERIC_STRNCPY_FROM_USER
29 bool 29 bool
@@ -48,14 +48,14 @@ config GENERIC_IOMAP
48 select GENERIC_PCI_IOMAP 48 select GENERIC_PCI_IOMAP
49 49
50config GENERIC_IO 50config GENERIC_IO
51 boolean 51 bool
52 default n 52 default n
53 53
54config STMP_DEVICE 54config STMP_DEVICE
55 bool 55 bool
56 56
57config PERCPU_RWSEM 57config PERCPU_RWSEM
58 boolean 58 bool
59 59
60config ARCH_USE_CMPXCHG_LOCKREF 60config ARCH_USE_CMPXCHG_LOCKREF
61 bool 61 bool
@@ -266,7 +266,7 @@ config DECOMPRESS_LZ4
266# Generic allocator support is selected if needed 266# Generic allocator support is selected if needed
267# 267#
268config GENERIC_ALLOCATOR 268config GENERIC_ALLOCATOR
269 boolean 269 bool
270 270
271# 271#
272# reed solomon support is select'ed if needed 272# reed solomon support is select'ed if needed
@@ -275,16 +275,16 @@ config REED_SOLOMON
275 tristate 275 tristate
276 276
277config REED_SOLOMON_ENC8 277config REED_SOLOMON_ENC8
278 boolean 278 bool
279 279
280config REED_SOLOMON_DEC8 280config REED_SOLOMON_DEC8
281 boolean 281 bool
282 282
283config REED_SOLOMON_ENC16 283config REED_SOLOMON_ENC16
284 boolean 284 bool
285 285
286config REED_SOLOMON_DEC16 286config REED_SOLOMON_DEC16
287 boolean 287 bool
288 288
289# 289#
290# BCH support is selected if needed 290# BCH support is selected if needed
@@ -293,7 +293,7 @@ config BCH
293 tristate 293 tristate
294 294
295config BCH_CONST_PARAMS 295config BCH_CONST_PARAMS
296 boolean 296 bool
297 help 297 help
298 Drivers may select this option to force specific constant 298 Drivers may select this option to force specific constant
299 values for parameters 'm' (Galois field order) and 't' 299 values for parameters 'm' (Galois field order) and 't'
@@ -329,7 +329,7 @@ config BCH_CONST_T
329# Textsearch support is select'ed if needed 329# Textsearch support is select'ed if needed
330# 330#
331config TEXTSEARCH 331config TEXTSEARCH
332 boolean 332 bool
333 333
334config TEXTSEARCH_KMP 334config TEXTSEARCH_KMP
335 tristate 335 tristate
@@ -341,10 +341,10 @@ config TEXTSEARCH_FSM
341 tristate 341 tristate
342 342
343config BTREE 343config BTREE
344 boolean 344 bool
345 345
346config INTERVAL_TREE 346config INTERVAL_TREE
347 boolean 347 bool
348 help 348 help
349 Simple, embeddable, interval-tree. Can find the start of an 349 Simple, embeddable, interval-tree. Can find the start of an
350 overlapping range in log(n) time and then iterate over all 350 overlapping range in log(n) time and then iterate over all
@@ -372,18 +372,18 @@ config ASSOCIATIVE_ARRAY
372 for more information. 372 for more information.
373 373
374config HAS_IOMEM 374config HAS_IOMEM
375 boolean 375 bool
376 depends on !NO_IOMEM 376 depends on !NO_IOMEM
377 select GENERIC_IO 377 select GENERIC_IO
378 default y 378 default y
379 379
380config HAS_IOPORT_MAP 380config HAS_IOPORT_MAP
381 boolean 381 bool
382 depends on HAS_IOMEM && !NO_IOPORT_MAP 382 depends on HAS_IOMEM && !NO_IOPORT_MAP
383 default y 383 default y
384 384
385config HAS_DMA 385config HAS_DMA
386 boolean 386 bool
387 depends on !NO_DMA 387 depends on !NO_DMA
388 default y 388 default y
389 389
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
index 0d83ea8a9605..bcce5f149310 100644
--- a/lib/pci_iomap.c
+++ b/lib/pci_iomap.c
@@ -10,10 +10,11 @@
10 10
11#ifdef CONFIG_PCI 11#ifdef CONFIG_PCI
12/** 12/**
13 * pci_iomap - create a virtual mapping cookie for a PCI BAR 13 * pci_iomap_range - create a virtual mapping cookie for a PCI BAR
14 * @dev: PCI device that owns the BAR 14 * @dev: PCI device that owns the BAR
15 * @bar: BAR number 15 * @bar: BAR number
16 * @maxlen: length of the memory to map 16 * @offset: map memory at the given offset in BAR
17 * @maxlen: max length of the memory to map
17 * 18 *
18 * Using this function you will get a __iomem address to your device BAR. 19 * Using this function you will get a __iomem address to your device BAR.
19 * You can access it using ioread*() and iowrite*(). These functions hide 20 * You can access it using ioread*() and iowrite*(). These functions hide
@@ -21,16 +22,21 @@
21 * you expect from them in the correct way. 22 * you expect from them in the correct way.
22 * 23 *
23 * @maxlen specifies the maximum length to map. If you want to get access to 24 * @maxlen specifies the maximum length to map. If you want to get access to
24 * the complete BAR without checking for its length first, pass %0 here. 25 * the complete BAR from offset to the end, pass %0 here.
25 * */ 26 * */
26void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 27void __iomem *pci_iomap_range(struct pci_dev *dev,
28 int bar,
29 unsigned long offset,
30 unsigned long maxlen)
27{ 31{
28 resource_size_t start = pci_resource_start(dev, bar); 32 resource_size_t start = pci_resource_start(dev, bar);
29 resource_size_t len = pci_resource_len(dev, bar); 33 resource_size_t len = pci_resource_len(dev, bar);
30 unsigned long flags = pci_resource_flags(dev, bar); 34 unsigned long flags = pci_resource_flags(dev, bar);
31 35
32 if (!len || !start) 36 if (len <= offset || !start)
33 return NULL; 37 return NULL;
38 len -= offset;
39 start += offset;
34 if (maxlen && len > maxlen) 40 if (maxlen && len > maxlen)
35 len = maxlen; 41 len = maxlen;
36 if (flags & IORESOURCE_IO) 42 if (flags & IORESOURCE_IO)
@@ -43,6 +49,25 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
43 /* What? */ 49 /* What? */
44 return NULL; 50 return NULL;
45} 51}
52EXPORT_SYMBOL(pci_iomap_range);
46 53
54/**
55 * pci_iomap - create a virtual mapping cookie for a PCI BAR
56 * @dev: PCI device that owns the BAR
57 * @bar: BAR number
58 * @maxlen: length of the memory to map
59 *
60 * Using this function you will get a __iomem address to your device BAR.
61 * You can access it using ioread*() and iowrite*(). These functions hide
62 * the details if this is a MMIO or PIO address space and will just do what
63 * you expect from them in the correct way.
64 *
65 * @maxlen specifies the maximum length to map. If you want to get access to
66 * the complete BAR without checking for its length first, pass %0 here.
67 * */
68void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
69{
70 return pci_iomap_range(dev, bar, 0, maxlen);
71}
47EXPORT_SYMBOL(pci_iomap); 72EXPORT_SYMBOL(pci_iomap);
48#endif /* CONFIG_PCI */ 73#endif /* CONFIG_PCI */
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 9cc4c4a90d00..b5344ef4c684 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -17,6 +17,7 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/log2.h> 19#include <linux/log2.h>
20#include <linux/sched.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
21#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
@@ -217,15 +218,15 @@ static void bucket_table_free(const struct bucket_table *tbl)
217static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, 218static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
218 size_t nbuckets) 219 size_t nbuckets)
219{ 220{
220 struct bucket_table *tbl; 221 struct bucket_table *tbl = NULL;
221 size_t size; 222 size_t size;
222 int i; 223 int i;
223 224
224 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 225 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
225 tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 226 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
227 tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
226 if (tbl == NULL) 228 if (tbl == NULL)
227 tbl = vzalloc(size); 229 tbl = vzalloc(size);
228
229 if (tbl == NULL) 230 if (tbl == NULL)
230 return NULL; 231 return NULL;
231 232
@@ -247,26 +248,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
247 * @ht: hash table 248 * @ht: hash table
248 * @new_size: new table size 249 * @new_size: new table size
249 */ 250 */
250bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) 251static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
251{ 252{
252 /* Expand table when exceeding 75% load */ 253 /* Expand table when exceeding 75% load */
253 return atomic_read(&ht->nelems) > (new_size / 4 * 3) && 254 return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
254 (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); 255 (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
255} 256}
256EXPORT_SYMBOL_GPL(rht_grow_above_75);
257 257
258/** 258/**
259 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size 259 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
260 * @ht: hash table 260 * @ht: hash table
261 * @new_size: new table size 261 * @new_size: new table size
262 */ 262 */
263bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) 263static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
264{ 264{
265 /* Shrink table beneath 30% load */ 265 /* Shrink table beneath 30% load */
266 return atomic_read(&ht->nelems) < (new_size * 3 / 10) && 266 return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
267 (atomic_read(&ht->shift) > ht->p.min_shift); 267 (atomic_read(&ht->shift) > ht->p.min_shift);
268} 268}
269EXPORT_SYMBOL_GPL(rht_shrink_below_30);
270 269
271static void lock_buckets(struct bucket_table *new_tbl, 270static void lock_buckets(struct bucket_table *new_tbl,
272 struct bucket_table *old_tbl, unsigned int hash) 271 struct bucket_table *old_tbl, unsigned int hash)
@@ -414,6 +413,7 @@ int rhashtable_expand(struct rhashtable *ht)
414 } 413 }
415 } 414 }
416 unlock_buckets(new_tbl, old_tbl, new_hash); 415 unlock_buckets(new_tbl, old_tbl, new_hash);
416 cond_resched();
417 } 417 }
418 418
419 /* Unzip interleaved hash chains */ 419 /* Unzip interleaved hash chains */
@@ -437,6 +437,7 @@ int rhashtable_expand(struct rhashtable *ht)
437 complete = false; 437 complete = false;
438 438
439 unlock_buckets(new_tbl, old_tbl, old_hash); 439 unlock_buckets(new_tbl, old_tbl, old_hash);
440 cond_resched();
440 } 441 }
441 } 442 }
442 443
@@ -495,6 +496,7 @@ int rhashtable_shrink(struct rhashtable *ht)
495 tbl->buckets[new_hash + new_tbl->size]); 496 tbl->buckets[new_hash + new_tbl->size]);
496 497
497 unlock_buckets(new_tbl, tbl, new_hash); 498 unlock_buckets(new_tbl, tbl, new_hash);
499 cond_resched();
498 } 500 }
499 501
500 /* Publish the new, valid hash table */ 502 /* Publish the new, valid hash table */
@@ -528,31 +530,19 @@ static void rht_deferred_worker(struct work_struct *work)
528 list_for_each_entry(walker, &ht->walkers, list) 530 list_for_each_entry(walker, &ht->walkers, list)
529 walker->resize = true; 531 walker->resize = true;
530 532
531 if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) 533 if (rht_grow_above_75(ht, tbl->size))
532 rhashtable_expand(ht); 534 rhashtable_expand(ht);
533 else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) 535 else if (rht_shrink_below_30(ht, tbl->size))
534 rhashtable_shrink(ht); 536 rhashtable_shrink(ht);
535
536unlock: 537unlock:
537 mutex_unlock(&ht->mutex); 538 mutex_unlock(&ht->mutex);
538} 539}
539 540
540static void rhashtable_wakeup_worker(struct rhashtable *ht)
541{
542 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
543 struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
544 size_t size = tbl->size;
545
546 /* Only adjust the table if no resizing is currently in progress. */
547 if (tbl == new_tbl &&
548 ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
549 (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
550 schedule_work(&ht->run_work);
551}
552
553static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, 541static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
554 struct bucket_table *tbl, u32 hash) 542 struct bucket_table *tbl,
543 const struct bucket_table *old_tbl, u32 hash)
555{ 544{
545 bool no_resize_running = tbl == old_tbl;
556 struct rhash_head *head; 546 struct rhash_head *head;
557 547
558 hash = rht_bucket_index(tbl, hash); 548 hash = rht_bucket_index(tbl, hash);
@@ -568,8 +558,8 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
568 rcu_assign_pointer(tbl->buckets[hash], obj); 558 rcu_assign_pointer(tbl->buckets[hash], obj);
569 559
570 atomic_inc(&ht->nelems); 560 atomic_inc(&ht->nelems);
571 561 if (no_resize_running && rht_grow_above_75(ht, tbl->size))
572 rhashtable_wakeup_worker(ht); 562 schedule_work(&ht->run_work);
573} 563}
574 564
575/** 565/**
@@ -599,7 +589,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
599 hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); 589 hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
600 590
601 lock_buckets(tbl, old_tbl, hash); 591 lock_buckets(tbl, old_tbl, hash);
602 __rhashtable_insert(ht, obj, tbl, hash); 592 __rhashtable_insert(ht, obj, tbl, old_tbl, hash);
603 unlock_buckets(tbl, old_tbl, hash); 593 unlock_buckets(tbl, old_tbl, hash);
604 594
605 rcu_read_unlock(); 595 rcu_read_unlock();
@@ -681,8 +671,11 @@ found:
681 unlock_buckets(new_tbl, old_tbl, new_hash); 671 unlock_buckets(new_tbl, old_tbl, new_hash);
682 672
683 if (ret) { 673 if (ret) {
674 bool no_resize_running = new_tbl == old_tbl;
675
684 atomic_dec(&ht->nelems); 676 atomic_dec(&ht->nelems);
685 rhashtable_wakeup_worker(ht); 677 if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size))
678 schedule_work(&ht->run_work);
686 } 679 }
687 680
688 rcu_read_unlock(); 681 rcu_read_unlock();
@@ -852,7 +845,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
852 goto exit; 845 goto exit;
853 } 846 }
854 847
855 __rhashtable_insert(ht, obj, new_tbl, new_hash); 848 __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash);
856 849
857exit: 850exit:
858 unlock_buckets(new_tbl, old_tbl, new_hash); 851 unlock_buckets(new_tbl, old_tbl, new_hash);
@@ -894,6 +887,9 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
894 if (!iter->walker) 887 if (!iter->walker)
895 return -ENOMEM; 888 return -ENOMEM;
896 889
890 INIT_LIST_HEAD(&iter->walker->list);
891 iter->walker->resize = false;
892
897 mutex_lock(&ht->mutex); 893 mutex_lock(&ht->mutex);
898 list_add(&iter->walker->list, &ht->walkers); 894 list_add(&iter->walker->list, &ht->walkers);
899 mutex_unlock(&ht->mutex); 895 mutex_unlock(&ht->mutex);
@@ -1111,8 +1107,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
1111 if (!ht->p.hash_rnd) 1107 if (!ht->p.hash_rnd)
1112 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); 1108 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
1113 1109
1114 if (ht->p.grow_decision || ht->p.shrink_decision) 1110 INIT_WORK(&ht->run_work, rht_deferred_worker);
1115 INIT_WORK(&ht->run_work, rht_deferred_worker);
1116 1111
1117 return 0; 1112 return 0;
1118} 1113}
@@ -1130,8 +1125,7 @@ void rhashtable_destroy(struct rhashtable *ht)
1130{ 1125{
1131 ht->being_destroyed = true; 1126 ht->being_destroyed = true;
1132 1127
1133 if (ht->p.grow_decision || ht->p.shrink_decision) 1128 cancel_work_sync(&ht->run_work);
1134 cancel_work_sync(&ht->run_work);
1135 1129
1136 mutex_lock(&ht->mutex); 1130 mutex_lock(&ht->mutex);
1137 bucket_table_free(rht_dereference(ht->tbl, ht)); 1131 bucket_table_free(rht_dereference(ht->tbl, ht));
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 1dfeba73fc74..67c7593d1dd6 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -191,18 +191,18 @@ error:
191 return err; 191 return err;
192} 192}
193 193
194static struct rhashtable ht;
195
194static int __init test_rht_init(void) 196static int __init test_rht_init(void)
195{ 197{
196 struct rhashtable ht;
197 struct rhashtable_params params = { 198 struct rhashtable_params params = {
198 .nelem_hint = TEST_HT_SIZE, 199 .nelem_hint = TEST_HT_SIZE,
199 .head_offset = offsetof(struct test_obj, node), 200 .head_offset = offsetof(struct test_obj, node),
200 .key_offset = offsetof(struct test_obj, value), 201 .key_offset = offsetof(struct test_obj, value),
201 .key_len = sizeof(int), 202 .key_len = sizeof(int),
202 .hashfn = jhash, 203 .hashfn = jhash,
204 .max_shift = 1, /* we expand/shrink manually here */
203 .nulls_base = (3U << RHT_BASE_SHIFT), 205 .nulls_base = (3U << RHT_BASE_SHIFT),
204 .grow_decision = rht_grow_above_75,
205 .shrink_decision = rht_shrink_below_30,
206 }; 206 };
207 int err; 207 int err;
208 208
@@ -222,6 +222,11 @@ static int __init test_rht_init(void)
222 return err; 222 return err;
223} 223}
224 224
225static void __exit test_rht_exit(void)
226{
227}
228
225module_init(test_rht_init); 229module_init(test_rht_init);
230module_exit(test_rht_exit);
226 231
227MODULE_LICENSE("GPL v2"); 232MODULE_LICENSE("GPL v2");
diff --git a/mm/Kconfig b/mm/Kconfig
index de5239c152f9..a03131b6ba8e 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -129,28 +129,28 @@ config SPARSEMEM_VMEMMAP
129 efficient option when sufficient kernel resources are available. 129 efficient option when sufficient kernel resources are available.
130 130
131config HAVE_MEMBLOCK 131config HAVE_MEMBLOCK
132 boolean 132 bool
133 133
134config HAVE_MEMBLOCK_NODE_MAP 134config HAVE_MEMBLOCK_NODE_MAP
135 boolean 135 bool
136 136
137config HAVE_MEMBLOCK_PHYS_MAP 137config HAVE_MEMBLOCK_PHYS_MAP
138 boolean 138 bool
139 139
140config HAVE_GENERIC_RCU_GUP 140config HAVE_GENERIC_RCU_GUP
141 boolean 141 bool
142 142
143config ARCH_DISCARD_MEMBLOCK 143config ARCH_DISCARD_MEMBLOCK
144 boolean 144 bool
145 145
146config NO_BOOTMEM 146config NO_BOOTMEM
147 boolean 147 bool
148 148
149config MEMORY_ISOLATION 149config MEMORY_ISOLATION
150 boolean 150 bool
151 151
152config MOVABLE_NODE 152config MOVABLE_NODE
153 boolean "Enable to assign a node which has only movable memory" 153 bool "Enable to assign a node which has only movable memory"
154 depends on HAVE_MEMBLOCK 154 depends on HAVE_MEMBLOCK
155 depends on NO_BOOTMEM 155 depends on NO_BOOTMEM
156 depends on X86_64 156 depends on X86_64
@@ -228,12 +228,12 @@ config SPLIT_PTLOCK_CPUS
228 default "4" 228 default "4"
229 229
230config ARCH_ENABLE_SPLIT_PMD_PTLOCK 230config ARCH_ENABLE_SPLIT_PMD_PTLOCK
231 boolean 231 bool
232 232
233# 233#
234# support for memory balloon 234# support for memory balloon
235config MEMORY_BALLOON 235config MEMORY_BALLOON
236 boolean 236 bool
237 237
238# 238#
239# support for memory balloon compaction 239# support for memory balloon compaction
@@ -276,7 +276,7 @@ config MIGRATION
276 allocation instead of reclaiming. 276 allocation instead of reclaiming.
277 277
278config ARCH_ENABLE_HUGEPAGE_MIGRATION 278config ARCH_ENABLE_HUGEPAGE_MIGRATION
279 boolean 279 bool
280 280
281config PHYS_ADDR_T_64BIT 281config PHYS_ADDR_T_64BIT
282 def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT 282 def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d18d3a6e7337..9fe07692eaad 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5247,7 +5247,7 @@ static int memory_low_show(struct seq_file *m, void *v)
5247 unsigned long low = ACCESS_ONCE(memcg->low); 5247 unsigned long low = ACCESS_ONCE(memcg->low);
5248 5248
5249 if (low == PAGE_COUNTER_MAX) 5249 if (low == PAGE_COUNTER_MAX)
5250 seq_puts(m, "infinity\n"); 5250 seq_puts(m, "max\n");
5251 else 5251 else
5252 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE); 5252 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5253 5253
@@ -5262,7 +5262,7 @@ static ssize_t memory_low_write(struct kernfs_open_file *of,
5262 int err; 5262 int err;
5263 5263
5264 buf = strstrip(buf); 5264 buf = strstrip(buf);
5265 err = page_counter_memparse(buf, "infinity", &low); 5265 err = page_counter_memparse(buf, "max", &low);
5266 if (err) 5266 if (err)
5267 return err; 5267 return err;
5268 5268
@@ -5277,7 +5277,7 @@ static int memory_high_show(struct seq_file *m, void *v)
5277 unsigned long high = ACCESS_ONCE(memcg->high); 5277 unsigned long high = ACCESS_ONCE(memcg->high);
5278 5278
5279 if (high == PAGE_COUNTER_MAX) 5279 if (high == PAGE_COUNTER_MAX)
5280 seq_puts(m, "infinity\n"); 5280 seq_puts(m, "max\n");
5281 else 5281 else
5282 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE); 5282 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5283 5283
@@ -5292,7 +5292,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
5292 int err; 5292 int err;
5293 5293
5294 buf = strstrip(buf); 5294 buf = strstrip(buf);
5295 err = page_counter_memparse(buf, "infinity", &high); 5295 err = page_counter_memparse(buf, "max", &high);
5296 if (err) 5296 if (err)
5297 return err; 5297 return err;
5298 5298
@@ -5307,7 +5307,7 @@ static int memory_max_show(struct seq_file *m, void *v)
5307 unsigned long max = ACCESS_ONCE(memcg->memory.limit); 5307 unsigned long max = ACCESS_ONCE(memcg->memory.limit);
5308 5308
5309 if (max == PAGE_COUNTER_MAX) 5309 if (max == PAGE_COUNTER_MAX)
5310 seq_puts(m, "infinity\n"); 5310 seq_puts(m, "max\n");
5311 else 5311 else
5312 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 5312 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5313 5313
@@ -5322,7 +5322,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
5322 int err; 5322 int err;
5323 5323
5324 buf = strstrip(buf); 5324 buf = strstrip(buf);
5325 err = page_counter_memparse(buf, "infinity", &max); 5325 err = page_counter_memparse(buf, "max", &max);
5326 if (err) 5326 if (err)
5327 return err; 5327 return err;
5328 5328
@@ -5426,7 +5426,7 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5426 if (memcg == root_mem_cgroup) 5426 if (memcg == root_mem_cgroup)
5427 return false; 5427 return false;
5428 5428
5429 if (page_counter_read(&memcg->memory) > memcg->low) 5429 if (page_counter_read(&memcg->memory) >= memcg->low)
5430 return false; 5430 return false;
5431 5431
5432 while (memcg != root) { 5432 while (memcg != root) {
@@ -5435,7 +5435,7 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5435 if (memcg == root_mem_cgroup) 5435 if (memcg == root_mem_cgroup)
5436 break; 5436 break;
5437 5437
5438 if (page_counter_read(&memcg->memory) > memcg->low) 5438 if (page_counter_read(&memcg->memory) >= memcg->low)
5439 return false; 5439 return false;
5440 } 5440 }
5441 return true; 5441 return true;
diff --git a/mm/nommu.c b/mm/nommu.c
index 7296360fc057..3e67e7538ecf 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1213,11 +1213,9 @@ static int do_mmap_private(struct vm_area_struct *vma,
1213 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { 1213 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
1214 total = point; 1214 total = point;
1215 kdebug("try to alloc exact %lu pages", total); 1215 kdebug("try to alloc exact %lu pages", total);
1216 base = alloc_pages_exact(len, GFP_KERNEL);
1217 } else {
1218 base = (void *)__get_free_pages(GFP_KERNEL, order);
1219 } 1216 }
1220 1217
1218 base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
1221 if (!base) 1219 if (!base)
1222 goto enomem; 1220 goto enomem;
1223 1221
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a47f0b229a1a..7abfa70cdc1a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2353,8 +2353,15 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2353 if (ac->high_zoneidx < ZONE_NORMAL) 2353 if (ac->high_zoneidx < ZONE_NORMAL)
2354 goto out; 2354 goto out;
2355 /* The OOM killer does not compensate for light reclaim */ 2355 /* The OOM killer does not compensate for light reclaim */
2356 if (!(gfp_mask & __GFP_FS)) 2356 if (!(gfp_mask & __GFP_FS)) {
2357 /*
2358 * XXX: Page reclaim didn't yield anything,
2359 * and the OOM killer can't be invoked, but
2360 * keep looping as per should_alloc_retry().
2361 */
2362 *did_some_progress = 1;
2357 goto out; 2363 goto out;
2364 }
2358 /* 2365 /*
2359 * GFP_THISNODE contains __GFP_NORETRY and we never hit this. 2366 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
2360 * Sanity check for bare calls of __GFP_THISNODE, not real OOM. 2367 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
diff --git a/mm/shmem.c b/mm/shmem.c
index a63031fa3e0c..cf2d0ca010bc 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1455,6 +1455,9 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
1455 1455
1456bool shmem_mapping(struct address_space *mapping) 1456bool shmem_mapping(struct address_space *mapping)
1457{ 1457{
1458 if (!mapping->host)
1459 return false;
1460
1458 return mapping->host->i_sb->s_op == &shmem_ops; 1461 return mapping->host->i_sb->s_op == &shmem_ops;
1459} 1462}
1460 1463
@@ -2319,8 +2322,8 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2319 2322
2320static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 2323static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2321{ 2324{
2322 bool old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode); 2325 bool old_is_dir = d_is_dir(old_dentry);
2323 bool new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode); 2326 bool new_is_dir = d_is_dir(new_dentry);
2324 2327
2325 if (old_dir != new_dir && old_is_dir != new_is_dir) { 2328 if (old_dir != new_dir && old_is_dir != new_is_dir) {
2326 if (old_is_dir) { 2329 if (old_is_dir) {
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index daa749c8b3fb..d8e376a5f0f1 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -524,6 +524,12 @@ static int p9_virtio_probe(struct virtio_device *vdev)
524 int err; 524 int err;
525 struct virtio_chan *chan; 525 struct virtio_chan *chan;
526 526
527 if (!vdev->config->get) {
528 dev_err(&vdev->dev, "%s failure: config access disabled\n",
529 __func__);
530 return -EINVAL;
531 }
532
527 chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL); 533 chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL);
528 if (!chan) { 534 if (!chan) {
529 pr_err("Failed to allocate virtio 9P channel\n"); 535 pr_err("Failed to allocate virtio 9P channel\n");
diff --git a/net/Kconfig b/net/Kconfig
index ff9ffc17fa0e..44dd5786ee91 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -231,18 +231,18 @@ source "net/hsr/Kconfig"
231source "net/switchdev/Kconfig" 231source "net/switchdev/Kconfig"
232 232
233config RPS 233config RPS
234 boolean 234 bool
235 depends on SMP && SYSFS 235 depends on SMP && SYSFS
236 default y 236 default y
237 237
238config RFS_ACCEL 238config RFS_ACCEL
239 boolean 239 bool
240 depends on RPS 240 depends on RPS
241 select CPU_RMAP 241 select CPU_RMAP
242 default y 242 default y
243 243
244config XPS 244config XPS
245 boolean 245 bool
246 depends on SMP 246 depends on SMP
247 default y 247 default y
248 248
@@ -254,18 +254,18 @@ config CGROUP_NET_PRIO
254 a per-interface basis. 254 a per-interface basis.
255 255
256config CGROUP_NET_CLASSID 256config CGROUP_NET_CLASSID
257 boolean "Network classid cgroup" 257 bool "Network classid cgroup"
258 depends on CGROUPS 258 depends on CGROUPS
259 ---help--- 259 ---help---
260 Cgroup subsystem for use as general purpose socket classid marker that is 260 Cgroup subsystem for use as general purpose socket classid marker that is
261 being used in cls_cgroup and for netfilter matching. 261 being used in cls_cgroup and for netfilter matching.
262 262
263config NET_RX_BUSY_POLL 263config NET_RX_BUSY_POLL
264 boolean 264 bool
265 default y 265 default y
266 266
267config BQL 267config BQL
268 boolean 268 bool
269 depends on SYSFS 269 depends on SYSFS
270 select DQL 270 select DQL
271 default y 271 default y
@@ -282,7 +282,7 @@ config BPF_JIT
282 this feature changing /proc/sys/net/core/bpf_jit_enable 282 this feature changing /proc/sys/net/core/bpf_jit_enable
283 283
284config NET_FLOW_LIMIT 284config NET_FLOW_LIMIT
285 boolean 285 bool
286 depends on RPS 286 depends on RPS
287 default y 287 default y
288 ---help--- 288 ---help---
diff --git a/net/bridge/br.c b/net/bridge/br.c
index fb57ab6b24f9..02c24cf63c34 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -190,6 +190,8 @@ static int __init br_init(void)
190{ 190{
191 int err; 191 int err;
192 192
193 BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
194
193 err = stp_proto_register(&br_stp_proto); 195 err = stp_proto_register(&br_stp_proto);
194 if (err < 0) { 196 if (err < 0) {
195 pr_err("bridge: can't register sap for STP\n"); 197 pr_err("bridge: can't register sap for STP\n");
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index 8bc7caa28e64..434ba8557826 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -84,7 +84,7 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
84 u16 tmp; 84 u16 tmp;
85 u16 len; 85 u16 len;
86 u16 hdrchks; 86 u16 hdrchks;
87 u16 pktchks; 87 int pktchks;
88 struct cffrml *this; 88 struct cffrml *this;
89 this = container_obj(layr); 89 this = container_obj(layr);
90 90
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 1be0b521ac49..f6c3b2137eea 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -255,9 +255,9 @@ inline u16 cfpkt_getlen(struct cfpkt *pkt)
255 return skb->len; 255 return skb->len;
256} 256}
257 257
258inline u16 cfpkt_iterate(struct cfpkt *pkt, 258int cfpkt_iterate(struct cfpkt *pkt,
259 u16 (*iter_func)(u16, void *, u16), 259 u16 (*iter_func)(u16, void *, u16),
260 u16 data) 260 u16 data)
261{ 261{
262 /* 262 /*
263 * Don't care about the performance hit of linearizing, 263 * Don't care about the performance hit of linearizing,
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 5d5ab67f516d..ec565508e904 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -239,6 +239,8 @@ enum {
239 Opt_nocrc, 239 Opt_nocrc,
240 Opt_cephx_require_signatures, 240 Opt_cephx_require_signatures,
241 Opt_nocephx_require_signatures, 241 Opt_nocephx_require_signatures,
242 Opt_tcp_nodelay,
243 Opt_notcp_nodelay,
242}; 244};
243 245
244static match_table_t opt_tokens = { 246static match_table_t opt_tokens = {
@@ -259,6 +261,8 @@ static match_table_t opt_tokens = {
259 {Opt_nocrc, "nocrc"}, 261 {Opt_nocrc, "nocrc"},
260 {Opt_cephx_require_signatures, "cephx_require_signatures"}, 262 {Opt_cephx_require_signatures, "cephx_require_signatures"},
261 {Opt_nocephx_require_signatures, "nocephx_require_signatures"}, 263 {Opt_nocephx_require_signatures, "nocephx_require_signatures"},
264 {Opt_tcp_nodelay, "tcp_nodelay"},
265 {Opt_notcp_nodelay, "notcp_nodelay"},
262 {-1, NULL} 266 {-1, NULL}
263}; 267};
264 268
@@ -457,6 +461,7 @@ ceph_parse_options(char *options, const char *dev_name,
457 case Opt_nocrc: 461 case Opt_nocrc:
458 opt->flags |= CEPH_OPT_NOCRC; 462 opt->flags |= CEPH_OPT_NOCRC;
459 break; 463 break;
464
460 case Opt_cephx_require_signatures: 465 case Opt_cephx_require_signatures:
461 opt->flags &= ~CEPH_OPT_NOMSGAUTH; 466 opt->flags &= ~CEPH_OPT_NOMSGAUTH;
462 break; 467 break;
@@ -464,6 +469,13 @@ ceph_parse_options(char *options, const char *dev_name,
464 opt->flags |= CEPH_OPT_NOMSGAUTH; 469 opt->flags |= CEPH_OPT_NOMSGAUTH;
465 break; 470 break;
466 471
472 case Opt_tcp_nodelay:
473 opt->flags |= CEPH_OPT_TCP_NODELAY;
474 break;
475 case Opt_notcp_nodelay:
476 opt->flags &= ~CEPH_OPT_TCP_NODELAY;
477 break;
478
467 default: 479 default:
468 BUG_ON(token); 480 BUG_ON(token);
469 } 481 }
@@ -518,10 +530,12 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
518 /* msgr */ 530 /* msgr */
519 if (ceph_test_opt(client, MYIP)) 531 if (ceph_test_opt(client, MYIP))
520 myaddr = &client->options->my_addr; 532 myaddr = &client->options->my_addr;
533
521 ceph_messenger_init(&client->msgr, myaddr, 534 ceph_messenger_init(&client->msgr, myaddr,
522 client->supported_features, 535 client->supported_features,
523 client->required_features, 536 client->required_features,
524 ceph_test_opt(client, NOCRC)); 537 ceph_test_opt(client, NOCRC),
538 ceph_test_opt(client, TCP_NODELAY));
525 539
526 /* subsystems */ 540 /* subsystems */
527 err = ceph_monc_init(&client->monc, client); 541 err = ceph_monc_init(&client->monc, client);
diff --git a/net/ceph/ceph_strings.c b/net/ceph/ceph_strings.c
index 30560202f57b..139a9cb19b0c 100644
--- a/net/ceph/ceph_strings.c
+++ b/net/ceph/ceph_strings.c
@@ -42,17 +42,3 @@ const char *ceph_osd_state_name(int s)
42 return "???"; 42 return "???";
43 } 43 }
44} 44}
45
46const char *ceph_pool_op_name(int op)
47{
48 switch (op) {
49 case POOL_OP_CREATE: return "create";
50 case POOL_OP_DELETE: return "delete";
51 case POOL_OP_AUID_CHANGE: return "auid change";
52 case POOL_OP_CREATE_SNAP: return "create snap";
53 case POOL_OP_DELETE_SNAP: return "delete snap";
54 case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap";
55 case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap";
56 }
57 return "???";
58}
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index d2d525529f87..14d9995097cc 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -127,8 +127,6 @@ static int monc_show(struct seq_file *s, void *p)
127 op = le16_to_cpu(req->request->hdr.type); 127 op = le16_to_cpu(req->request->hdr.type);
128 if (op == CEPH_MSG_STATFS) 128 if (op == CEPH_MSG_STATFS)
129 seq_printf(s, "%llu statfs\n", req->tid); 129 seq_printf(s, "%llu statfs\n", req->tid);
130 else if (op == CEPH_MSG_POOLOP)
131 seq_printf(s, "%llu poolop\n", req->tid);
132 else if (op == CEPH_MSG_MON_GET_VERSION) 130 else if (op == CEPH_MSG_MON_GET_VERSION)
133 seq_printf(s, "%llu mon_get_version", req->tid); 131 seq_printf(s, "%llu mon_get_version", req->tid);
134 else 132 else
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 33a2f201e460..6b3f54ed65ba 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -510,6 +510,16 @@ static int ceph_tcp_connect(struct ceph_connection *con)
510 return ret; 510 return ret;
511 } 511 }
512 512
513 if (con->msgr->tcp_nodelay) {
514 int optval = 1;
515
516 ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
517 (char *)&optval, sizeof(optval));
518 if (ret)
519 pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d",
520 ret);
521 }
522
513 sk_set_memalloc(sock->sk); 523 sk_set_memalloc(sock->sk);
514 524
515 con->sock = sock; 525 con->sock = sock;
@@ -2922,7 +2932,8 @@ void ceph_messenger_init(struct ceph_messenger *msgr,
2922 struct ceph_entity_addr *myaddr, 2932 struct ceph_entity_addr *myaddr,
2923 u64 supported_features, 2933 u64 supported_features,
2924 u64 required_features, 2934 u64 required_features,
2925 bool nocrc) 2935 bool nocrc,
2936 bool tcp_nodelay)
2926{ 2937{
2927 msgr->supported_features = supported_features; 2938 msgr->supported_features = supported_features;
2928 msgr->required_features = required_features; 2939 msgr->required_features = required_features;
@@ -2937,6 +2948,7 @@ void ceph_messenger_init(struct ceph_messenger *msgr,
2937 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 2948 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2938 encode_my_addr(msgr); 2949 encode_my_addr(msgr);
2939 msgr->nocrc = nocrc; 2950 msgr->nocrc = nocrc;
2951 msgr->tcp_nodelay = tcp_nodelay;
2940 2952
2941 atomic_set(&msgr->stopping, 0); 2953 atomic_set(&msgr->stopping, 0);
2942 2954
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index f2148e22b148..2b3cf05e87b0 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -410,7 +410,7 @@ out_unlocked:
410} 410}
411 411
412/* 412/*
413 * generic requests (e.g., statfs, poolop) 413 * generic requests (currently statfs, mon_get_version)
414 */ 414 */
415static struct ceph_mon_generic_request *__lookup_generic_req( 415static struct ceph_mon_generic_request *__lookup_generic_req(
416 struct ceph_mon_client *monc, u64 tid) 416 struct ceph_mon_client *monc, u64 tid)
@@ -569,7 +569,7 @@ static void handle_statfs_reply(struct ceph_mon_client *monc,
569 return; 569 return;
570 570
571bad: 571bad:
572 pr_err("corrupt generic reply, tid %llu\n", tid); 572 pr_err("corrupt statfs reply, tid %llu\n", tid);
573 ceph_msg_dump(msg); 573 ceph_msg_dump(msg);
574} 574}
575 575
@@ -588,7 +588,6 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
588 588
589 kref_init(&req->kref); 589 kref_init(&req->kref);
590 req->buf = buf; 590 req->buf = buf;
591 req->buf_len = sizeof(*buf);
592 init_completion(&req->completion); 591 init_completion(&req->completion);
593 592
594 err = -ENOMEM; 593 err = -ENOMEM;
@@ -611,7 +610,7 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
611 err = do_generic_request(monc, req); 610 err = do_generic_request(monc, req);
612 611
613out: 612out:
614 kref_put(&req->kref, release_generic_request); 613 put_generic_request(req);
615 return err; 614 return err;
616} 615}
617EXPORT_SYMBOL(ceph_monc_do_statfs); 616EXPORT_SYMBOL(ceph_monc_do_statfs);
@@ -647,7 +646,7 @@ static void handle_get_version_reply(struct ceph_mon_client *monc,
647 646
648 return; 647 return;
649bad: 648bad:
650 pr_err("corrupt mon_get_version reply\n"); 649 pr_err("corrupt mon_get_version reply, tid %llu\n", tid);
651 ceph_msg_dump(msg); 650 ceph_msg_dump(msg);
652} 651}
653 652
@@ -670,7 +669,6 @@ int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what,
670 669
671 kref_init(&req->kref); 670 kref_init(&req->kref);
672 req->buf = newest; 671 req->buf = newest;
673 req->buf_len = sizeof(*newest);
674 init_completion(&req->completion); 672 init_completion(&req->completion);
675 673
676 req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION, 674 req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
@@ -701,134 +699,12 @@ int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what,
701 699
702 mutex_unlock(&monc->mutex); 700 mutex_unlock(&monc->mutex);
703out: 701out:
704 kref_put(&req->kref, release_generic_request); 702 put_generic_request(req);
705 return err; 703 return err;
706} 704}
707EXPORT_SYMBOL(ceph_monc_do_get_version); 705EXPORT_SYMBOL(ceph_monc_do_get_version);
708 706
709/* 707/*
710 * pool ops
711 */
712static int get_poolop_reply_buf(const char *src, size_t src_len,
713 char *dst, size_t dst_len)
714{
715 u32 buf_len;
716
717 if (src_len != sizeof(u32) + dst_len)
718 return -EINVAL;
719
720 buf_len = le32_to_cpu(*(__le32 *)src);
721 if (buf_len != dst_len)
722 return -EINVAL;
723
724 memcpy(dst, src + sizeof(u32), dst_len);
725 return 0;
726}
727
728static void handle_poolop_reply(struct ceph_mon_client *monc,
729 struct ceph_msg *msg)
730{
731 struct ceph_mon_generic_request *req;
732 struct ceph_mon_poolop_reply *reply = msg->front.iov_base;
733 u64 tid = le64_to_cpu(msg->hdr.tid);
734
735 if (msg->front.iov_len < sizeof(*reply))
736 goto bad;
737 dout("handle_poolop_reply %p tid %llu\n", msg, tid);
738
739 mutex_lock(&monc->mutex);
740 req = __lookup_generic_req(monc, tid);
741 if (req) {
742 if (req->buf_len &&
743 get_poolop_reply_buf(msg->front.iov_base + sizeof(*reply),
744 msg->front.iov_len - sizeof(*reply),
745 req->buf, req->buf_len) < 0) {
746 mutex_unlock(&monc->mutex);
747 goto bad;
748 }
749 req->result = le32_to_cpu(reply->reply_code);
750 get_generic_request(req);
751 }
752 mutex_unlock(&monc->mutex);
753 if (req) {
754 complete(&req->completion);
755 put_generic_request(req);
756 }
757 return;
758
759bad:
760 pr_err("corrupt generic reply, tid %llu\n", tid);
761 ceph_msg_dump(msg);
762}
763
764/*
765 * Do a synchronous pool op.
766 */
767static int do_poolop(struct ceph_mon_client *monc, u32 op,
768 u32 pool, u64 snapid,
769 char *buf, int len)
770{
771 struct ceph_mon_generic_request *req;
772 struct ceph_mon_poolop *h;
773 int err;
774
775 req = kzalloc(sizeof(*req), GFP_NOFS);
776 if (!req)
777 return -ENOMEM;
778
779 kref_init(&req->kref);
780 req->buf = buf;
781 req->buf_len = len;
782 init_completion(&req->completion);
783
784 err = -ENOMEM;
785 req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS,
786 true);
787 if (!req->request)
788 goto out;
789 req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS,
790 true);
791 if (!req->reply)
792 goto out;
793
794 /* fill out request */
795 req->request->hdr.version = cpu_to_le16(2);
796 h = req->request->front.iov_base;
797 h->monhdr.have_version = 0;
798 h->monhdr.session_mon = cpu_to_le16(-1);
799 h->monhdr.session_mon_tid = 0;
800 h->fsid = monc->monmap->fsid;
801 h->pool = cpu_to_le32(pool);
802 h->op = cpu_to_le32(op);
803 h->auid = 0;
804 h->snapid = cpu_to_le64(snapid);
805 h->name_len = 0;
806
807 err = do_generic_request(monc, req);
808
809out:
810 kref_put(&req->kref, release_generic_request);
811 return err;
812}
813
814int ceph_monc_create_snapid(struct ceph_mon_client *monc,
815 u32 pool, u64 *snapid)
816{
817 return do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP,
818 pool, 0, (char *)snapid, sizeof(*snapid));
819
820}
821EXPORT_SYMBOL(ceph_monc_create_snapid);
822
823int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
824 u32 pool, u64 snapid)
825{
826 return do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP,
827 pool, snapid, NULL, 0);
828
829}
830
831/*
832 * Resend pending generic requests. 708 * Resend pending generic requests.
833 */ 709 */
834static void __resend_generic_request(struct ceph_mon_client *monc) 710static void __resend_generic_request(struct ceph_mon_client *monc)
@@ -1112,10 +988,6 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
1112 handle_get_version_reply(monc, msg); 988 handle_get_version_reply(monc, msg);
1113 break; 989 break;
1114 990
1115 case CEPH_MSG_POOLOP_REPLY:
1116 handle_poolop_reply(monc, msg);
1117 break;
1118
1119 case CEPH_MSG_MON_MAP: 991 case CEPH_MSG_MON_MAP:
1120 ceph_monc_handle_map(monc, msg); 992 ceph_monc_handle_map(monc, msg);
1121 break; 993 break;
@@ -1154,7 +1026,6 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
1154 case CEPH_MSG_MON_SUBSCRIBE_ACK: 1026 case CEPH_MSG_MON_SUBSCRIBE_ACK:
1155 m = ceph_msg_get(monc->m_subscribe_ack); 1027 m = ceph_msg_get(monc->m_subscribe_ack);
1156 break; 1028 break;
1157 case CEPH_MSG_POOLOP_REPLY:
1158 case CEPH_MSG_STATFS_REPLY: 1029 case CEPH_MSG_STATFS_REPLY:
1159 return get_generic_reply(con, hdr, skip); 1030 return get_generic_reply(con, hdr, skip);
1160 case CEPH_MSG_AUTH_REPLY: 1031 case CEPH_MSG_AUTH_REPLY:
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 53299c7b0ca4..41a4abc7e98e 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1035,10 +1035,11 @@ static void put_osd(struct ceph_osd *osd)
1035{ 1035{
1036 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), 1036 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
1037 atomic_read(&osd->o_ref) - 1); 1037 atomic_read(&osd->o_ref) - 1);
1038 if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) { 1038 if (atomic_dec_and_test(&osd->o_ref)) {
1039 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; 1039 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
1040 1040
1041 ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); 1041 if (osd->o_auth.authorizer)
1042 ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
1042 kfree(osd); 1043 kfree(osd);
1043 } 1044 }
1044} 1045}
@@ -1048,14 +1049,24 @@ static void put_osd(struct ceph_osd *osd)
1048 */ 1049 */
1049static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) 1050static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
1050{ 1051{
1051 dout("__remove_osd %p\n", osd); 1052 dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
1052 WARN_ON(!list_empty(&osd->o_requests)); 1053 WARN_ON(!list_empty(&osd->o_requests));
1053 WARN_ON(!list_empty(&osd->o_linger_requests)); 1054 WARN_ON(!list_empty(&osd->o_linger_requests));
1054 1055
1055 rb_erase(&osd->o_node, &osdc->osds);
1056 list_del_init(&osd->o_osd_lru); 1056 list_del_init(&osd->o_osd_lru);
1057 ceph_con_close(&osd->o_con); 1057 rb_erase(&osd->o_node, &osdc->osds);
1058 put_osd(osd); 1058 RB_CLEAR_NODE(&osd->o_node);
1059}
1060
1061static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
1062{
1063 dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
1064
1065 if (!RB_EMPTY_NODE(&osd->o_node)) {
1066 ceph_con_close(&osd->o_con);
1067 __remove_osd(osdc, osd);
1068 put_osd(osd);
1069 }
1059} 1070}
1060 1071
1061static void remove_all_osds(struct ceph_osd_client *osdc) 1072static void remove_all_osds(struct ceph_osd_client *osdc)
@@ -1065,7 +1076,7 @@ static void remove_all_osds(struct ceph_osd_client *osdc)
1065 while (!RB_EMPTY_ROOT(&osdc->osds)) { 1076 while (!RB_EMPTY_ROOT(&osdc->osds)) {
1066 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), 1077 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
1067 struct ceph_osd, o_node); 1078 struct ceph_osd, o_node);
1068 __remove_osd(osdc, osd); 1079 remove_osd(osdc, osd);
1069 } 1080 }
1070 mutex_unlock(&osdc->request_mutex); 1081 mutex_unlock(&osdc->request_mutex);
1071} 1082}
@@ -1106,7 +1117,7 @@ static void remove_old_osds(struct ceph_osd_client *osdc)
1106 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { 1117 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
1107 if (time_before(jiffies, osd->lru_ttl)) 1118 if (time_before(jiffies, osd->lru_ttl))
1108 break; 1119 break;
1109 __remove_osd(osdc, osd); 1120 remove_osd(osdc, osd);
1110 } 1121 }
1111 mutex_unlock(&osdc->request_mutex); 1122 mutex_unlock(&osdc->request_mutex);
1112} 1123}
@@ -1121,8 +1132,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
1121 dout("__reset_osd %p osd%d\n", osd, osd->o_osd); 1132 dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
1122 if (list_empty(&osd->o_requests) && 1133 if (list_empty(&osd->o_requests) &&
1123 list_empty(&osd->o_linger_requests)) { 1134 list_empty(&osd->o_linger_requests)) {
1124 __remove_osd(osdc, osd); 1135 remove_osd(osdc, osd);
1125
1126 return -ENODEV; 1136 return -ENODEV;
1127 } 1137 }
1128 1138
@@ -1926,6 +1936,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
1926{ 1936{
1927 struct rb_node *p, *n; 1937 struct rb_node *p, *n;
1928 1938
1939 dout("%s %p\n", __func__, osdc);
1929 for (p = rb_first(&osdc->osds); p; p = n) { 1940 for (p = rb_first(&osdc->osds); p; p = n) {
1930 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); 1941 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
1931 1942
diff --git a/net/compat.c b/net/compat.c
index 49c6a8fb9f09..478443182bbe 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -711,24 +711,18 @@ static unsigned char nas[21] = {
711 711
712COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags) 712COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
713{ 713{
714 if (flags & MSG_CMSG_COMPAT)
715 return -EINVAL;
716 return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 714 return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
717} 715}
718 716
719COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg, 717COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
720 unsigned int, vlen, unsigned int, flags) 718 unsigned int, vlen, unsigned int, flags)
721{ 719{
722 if (flags & MSG_CMSG_COMPAT)
723 return -EINVAL;
724 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, 720 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
725 flags | MSG_CMSG_COMPAT); 721 flags | MSG_CMSG_COMPAT);
726} 722}
727 723
728COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags) 724COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
729{ 725{
730 if (flags & MSG_CMSG_COMPAT)
731 return -EINVAL;
732 return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 726 return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
733} 727}
734 728
@@ -751,9 +745,6 @@ COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
751 int datagrams; 745 int datagrams;
752 struct timespec ktspec; 746 struct timespec ktspec;
753 747
754 if (flags & MSG_CMSG_COMPAT)
755 return -EINVAL;
756
757 if (timeout == NULL) 748 if (timeout == NULL)
758 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, 749 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
759 flags | MSG_CMSG_COMPAT, NULL); 750 flags | MSG_CMSG_COMPAT, NULL);
diff --git a/net/core/dev.c b/net/core/dev.c
index 8f9710c62e20..962ee9d71964 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -946,7 +946,7 @@ bool dev_valid_name(const char *name)
946 return false; 946 return false;
947 947
948 while (*name) { 948 while (*name) {
949 if (*name == '/' || isspace(*name)) 949 if (*name == '/' || *name == ':' || isspace(*name))
950 return false; 950 return false;
951 name++; 951 name++;
952 } 952 }
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index eb0c3ace7458..1d00b8922902 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -98,6 +98,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
98 [NETIF_F_RXALL_BIT] = "rx-all", 98 [NETIF_F_RXALL_BIT] = "rx-all",
99 [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload", 99 [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
100 [NETIF_F_BUSY_POLL_BIT] = "busy-poll", 100 [NETIF_F_BUSY_POLL_BIT] = "busy-poll",
101 [NETIF_F_HW_SWITCH_OFFLOAD_BIT] = "hw-switch-offload",
101}; 102};
102 103
103static const char 104static const char
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 0c08062d1796..1e2f46a69d50 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
32 return 0; 32 return 0;
33 33
34nla_put_failure: 34nla_put_failure:
35 kfree(d->xstats);
36 d->xstats = NULL;
37 d->xstats_len = 0;
35 spin_unlock_bh(d->lock); 38 spin_unlock_bh(d->lock);
36 return -1; 39 return -1;
37} 40}
@@ -305,7 +308,9 @@ int
305gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) 308gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
306{ 309{
307 if (d->compat_xstats) { 310 if (d->compat_xstats) {
308 d->xstats = st; 311 d->xstats = kmemdup(st, len, GFP_ATOMIC);
312 if (!d->xstats)
313 goto err_out;
309 d->xstats_len = len; 314 d->xstats_len = len;
310 } 315 }
311 316
@@ -313,6 +318,11 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
313 return gnet_stats_copy(d, TCA_STATS_APP, st, len); 318 return gnet_stats_copy(d, TCA_STATS_APP, st, len);
314 319
315 return 0; 320 return 0;
321
322err_out:
323 d->xstats_len = 0;
324 spin_unlock_bh(d->lock);
325 return -1;
316} 326}
317EXPORT_SYMBOL(gnet_stats_copy_app); 327EXPORT_SYMBOL(gnet_stats_copy_app);
318 328
@@ -345,6 +355,9 @@ gnet_stats_finish_copy(struct gnet_dump *d)
345 return -1; 355 return -1;
346 } 356 }
347 357
358 kfree(d->xstats);
359 d->xstats = NULL;
360 d->xstats_len = 0;
348 spin_unlock_bh(d->lock); 361 spin_unlock_bh(d->lock);
349 return 0; 362 return 0;
350} 363}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b4899f5b7388..508155b283dd 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1134,6 +1134,9 @@ static ssize_t pktgen_if_write(struct file *file,
1134 return len; 1134 return len;
1135 1135
1136 i += len; 1136 i += len;
1137 if ((value > 1) &&
1138 (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
1139 return -ENOTSUPP;
1137 pkt_dev->burst = value < 1 ? 1 : value; 1140 pkt_dev->burst = value < 1 ? 1 : value;
1138 sprintf(pg_result, "OK: burst=%d", pkt_dev->burst); 1141 sprintf(pg_result, "OK: burst=%d", pkt_dev->burst);
1139 return count; 1142 return count;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ab293a3066b3..25b4b5d23485 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1300,7 +1300,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1300 s_h = cb->args[0]; 1300 s_h = cb->args[0];
1301 s_idx = cb->args[1]; 1301 s_idx = cb->args[1];
1302 1302
1303 rcu_read_lock();
1304 cb->seq = net->dev_base_seq; 1303 cb->seq = net->dev_base_seq;
1305 1304
1306 /* A hack to preserve kernel<->userspace interface. 1305 /* A hack to preserve kernel<->userspace interface.
@@ -1322,7 +1321,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1322 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1321 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1323 idx = 0; 1322 idx = 0;
1324 head = &net->dev_index_head[h]; 1323 head = &net->dev_index_head[h];
1325 hlist_for_each_entry_rcu(dev, head, index_hlist) { 1324 hlist_for_each_entry(dev, head, index_hlist) {
1326 if (idx < s_idx) 1325 if (idx < s_idx)
1327 goto cont; 1326 goto cont;
1328 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1327 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -1344,7 +1343,6 @@ cont:
1344 } 1343 }
1345 } 1344 }
1346out: 1345out:
1347 rcu_read_unlock();
1348 cb->args[1] = idx; 1346 cb->args[1] = idx;
1349 cb->args[0] = h; 1347 cb->args[0] = h;
1350 1348
@@ -2012,8 +2010,8 @@ replay:
2012 } 2010 }
2013 2011
2014 if (1) { 2012 if (1) {
2015 struct nlattr *attr[ops ? ops->maxtype + 1 : 0]; 2013 struct nlattr *attr[ops ? ops->maxtype + 1 : 1];
2016 struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 0]; 2014 struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1];
2017 struct nlattr **data = NULL; 2015 struct nlattr **data = NULL;
2018 struct nlattr **slave_data = NULL; 2016 struct nlattr **slave_data = NULL;
2019 struct net *dest_net, *link_net = NULL; 2017 struct net *dest_net, *link_net = NULL;
@@ -2122,6 +2120,10 @@ replay:
2122 if (IS_ERR(dest_net)) 2120 if (IS_ERR(dest_net))
2123 return PTR_ERR(dest_net); 2121 return PTR_ERR(dest_net);
2124 2122
2123 err = -EPERM;
2124 if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
2125 goto out;
2126
2125 if (tb[IFLA_LINK_NETNSID]) { 2127 if (tb[IFLA_LINK_NETNSID]) {
2126 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); 2128 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
2127 2129
@@ -2130,6 +2132,9 @@ replay:
2130 err = -EINVAL; 2132 err = -EINVAL;
2131 goto out; 2133 goto out;
2132 } 2134 }
2135 err = -EPERM;
2136 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
2137 goto out;
2133 } 2138 }
2134 2139
2135 dev = rtnl_create_link(link_net ? : dest_net, ifname, 2140 dev = rtnl_create_link(link_net ? : dest_net, ifname,
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 374e43bc6b80..913b94a77060 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3620,13 +3620,14 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
3620{ 3620{
3621 struct sk_buff_head *q = &sk->sk_error_queue; 3621 struct sk_buff_head *q = &sk->sk_error_queue;
3622 struct sk_buff *skb, *skb_next; 3622 struct sk_buff *skb, *skb_next;
3623 unsigned long flags;
3623 int err = 0; 3624 int err = 0;
3624 3625
3625 spin_lock_bh(&q->lock); 3626 spin_lock_irqsave(&q->lock, flags);
3626 skb = __skb_dequeue(q); 3627 skb = __skb_dequeue(q);
3627 if (skb && (skb_next = skb_peek(q))) 3628 if (skb && (skb_next = skb_peek(q)))
3628 err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 3629 err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
3629 spin_unlock_bh(&q->lock); 3630 spin_unlock_irqrestore(&q->lock, flags);
3630 3631
3631 sk->sk_err = err; 3632 sk->sk_err = err;
3632 if (err) 3633 if (err)
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 1d7c1256e845..3b81092771f8 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1062,7 +1062,7 @@ source_ok:
1062 if (decnet_debug_level & 16) 1062 if (decnet_debug_level & 16)
1063 printk(KERN_DEBUG 1063 printk(KERN_DEBUG
1064 "dn_route_output_slow: initial checks complete." 1064 "dn_route_output_slow: initial checks complete."
1065 " dst=%o4x src=%04x oif=%d try_hard=%d\n", 1065 " dst=%04x src=%04x oif=%d try_hard=%d\n",
1066 le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr), 1066 le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr),
1067 fld.flowidn_oif, try_hard); 1067 fld.flowidn_oif, try_hard);
1068 1068
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index a138d75751df..44d27469ae55 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -359,8 +359,11 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
359 struct hsr_port *port; 359 struct hsr_port *port;
360 360
361 hsr = netdev_priv(hsr_dev); 361 hsr = netdev_priv(hsr_dev);
362
363 rtnl_lock();
362 hsr_for_each_port(hsr, port) 364 hsr_for_each_port(hsr, port)
363 hsr_del_port(port); 365 hsr_del_port(port);
366 rtnl_unlock();
364 367
365 del_timer_sync(&hsr->prune_timer); 368 del_timer_sync(&hsr->prune_timer);
366 del_timer_sync(&hsr->announce_timer); 369 del_timer_sync(&hsr->announce_timer);
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index 779d28b65417..cd37d0011b42 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -36,6 +36,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
36 return NOTIFY_DONE; /* Not an HSR device */ 36 return NOTIFY_DONE; /* Not an HSR device */
37 hsr = netdev_priv(dev); 37 hsr = netdev_priv(dev);
38 port = hsr_port_get_hsr(hsr, HSR_PT_MASTER); 38 port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
39 if (port == NULL) {
40 /* Resend of notification concerning removed device? */
41 return NOTIFY_DONE;
42 }
39 } else { 43 } else {
40 hsr = port->hsr; 44 hsr = port->hsr;
41 } 45 }
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index a348dcbcd683..7d37366cc695 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -181,8 +181,10 @@ void hsr_del_port(struct hsr_port *port)
181 list_del_rcu(&port->port_list); 181 list_del_rcu(&port->port_list);
182 182
183 if (port != master) { 183 if (port != master) {
184 netdev_update_features(master->dev); 184 if (master != NULL) {
185 dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); 185 netdev_update_features(master->dev);
186 dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
187 }
186 netdev_rx_handler_unregister(port->dev); 188 netdev_rx_handler_unregister(port->dev);
187 dev_set_promiscuity(port->dev, -1); 189 dev_set_promiscuity(port->dev, -1);
188 } 190 }
@@ -192,5 +194,7 @@ void hsr_del_port(struct hsr_port *port)
192 */ 194 */
193 195
194 synchronize_rcu(); 196 synchronize_rcu();
195 dev_put(port->dev); 197
198 if (port != master)
199 dev_put(port->dev);
196} 200}
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index e5b6d0ddcb58..2c8d98e728c0 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -664,7 +664,7 @@ struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
664 if (skb->protocol != htons(ETH_P_IP)) 664 if (skb->protocol != htons(ETH_P_IP))
665 return skb; 665 return skb;
666 666
667 if (!skb_copy_bits(skb, 0, &iph, sizeof(iph))) 667 if (skb_copy_bits(skb, 0, &iph, sizeof(iph)) < 0)
668 return skb; 668 return skb;
669 669
670 if (iph.ihl < 5 || iph.version != 4) 670 if (iph.ihl < 5 || iph.version != 4)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d68199d9b2b0..a7aea2048a0d 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -888,7 +888,8 @@ static int __ip_append_data(struct sock *sk,
888 cork->length += length; 888 cork->length += length;
889 if (((length > mtu) || (skb && skb_is_gso(skb))) && 889 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
890 (sk->sk_protocol == IPPROTO_UDP) && 890 (sk->sk_protocol == IPPROTO_UDP) &&
891 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) { 891 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
892 (sk->sk_type == SOCK_DGRAM)) {
892 err = ip_ufo_append_data(sk, queue, getfrag, from, length, 893 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
893 hh_len, fragheaderlen, transhdrlen, 894 hh_len, fragheaderlen, transhdrlen,
894 maxfraglen, flags); 895 maxfraglen, flags);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8fdd27b17306..fb4cf8b8e121 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4770,7 +4770,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
4770 return false; 4770 return false;
4771 4771
4772 /* If we filled the congestion window, do not expand. */ 4772 /* If we filled the congestion window, do not expand. */
4773 if (tp->packets_out >= tp->snd_cwnd) 4773 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
4774 return false; 4774 return false;
4775 4775
4776 return true; 4776 return true;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 783bccfcc060..88d2cf0cae52 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4935,6 +4935,21 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
4935 return ret; 4935 return ret;
4936} 4936}
4937 4937
4938static
4939int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
4940 void __user *buffer, size_t *lenp, loff_t *ppos)
4941{
4942 struct inet6_dev *idev = ctl->extra1;
4943 int min_mtu = IPV6_MIN_MTU;
4944 struct ctl_table lctl;
4945
4946 lctl = *ctl;
4947 lctl.extra1 = &min_mtu;
4948 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
4949
4950 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
4951}
4952
4938static void dev_disable_change(struct inet6_dev *idev) 4953static void dev_disable_change(struct inet6_dev *idev)
4939{ 4954{
4940 struct netdev_notifier_info info; 4955 struct netdev_notifier_info info;
@@ -5086,7 +5101,7 @@ static struct addrconf_sysctl_table
5086 .data = &ipv6_devconf.mtu6, 5101 .data = &ipv6_devconf.mtu6,
5087 .maxlen = sizeof(int), 5102 .maxlen = sizeof(int),
5088 .mode = 0644, 5103 .mode = 0644,
5089 .proc_handler = proc_dointvec, 5104 .proc_handler = addrconf_sysctl_mtu,
5090 }, 5105 },
5091 { 5106 {
5092 .procname = "accept_ra", 5107 .procname = "accept_ra",
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 7deebf102cba..0a04a37305d5 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1298,7 +1298,8 @@ emsgsize:
1298 if (((length > mtu) || 1298 if (((length > mtu) ||
1299 (skb && skb_is_gso(skb))) && 1299 (skb && skb_is_gso(skb))) &&
1300 (sk->sk_protocol == IPPROTO_UDP) && 1300 (sk->sk_protocol == IPPROTO_UDP) &&
1301 (rt->dst.dev->features & NETIF_F_UFO)) { 1301 (rt->dst.dev->features & NETIF_F_UFO) &&
1302 (sk->sk_type == SOCK_DGRAM)) {
1302 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, 1303 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1303 hh_len, fragheaderlen, 1304 hh_len, fragheaderlen,
1304 transhdrlen, mtu, flags, rt); 1305 transhdrlen, mtu, flags, rt);
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 40695b9751c1..9940a41efca1 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -811,7 +811,7 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
811 break; 811 break;
812 } 812 }
813 spin_unlock_irqrestore(&self->spinlock, flags); 813 spin_unlock_irqrestore(&self->spinlock, flags);
814 current->state = TASK_RUNNING; 814 __set_current_state(TASK_RUNNING);
815} 815}
816 816
817/* 817/*
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 3c83a1e5ab03..1215693fdd22 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -305,7 +305,7 @@ irnet_ctrl_read(irnet_socket * ap,
305 305
306 /* Put ourselves on the wait queue to be woken up */ 306 /* Put ourselves on the wait queue to be woken up */
307 add_wait_queue(&irnet_events.rwait, &wait); 307 add_wait_queue(&irnet_events.rwait, &wait);
308 current->state = TASK_INTERRUPTIBLE; 308 set_current_state(TASK_INTERRUPTIBLE);
309 for(;;) 309 for(;;)
310 { 310 {
311 /* If there is unread events */ 311 /* If there is unread events */
@@ -321,7 +321,7 @@ irnet_ctrl_read(irnet_socket * ap,
321 /* Yield and wait to be woken up */ 321 /* Yield and wait to be woken up */
322 schedule(); 322 schedule();
323 } 323 }
324 current->state = TASK_RUNNING; 324 __set_current_state(TASK_RUNNING);
325 remove_wait_queue(&irnet_events.rwait, &wait); 325 remove_wait_queue(&irnet_events.rwait, &wait);
326 326
327 /* Did we got it ? */ 327 /* Did we got it ? */
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index ff0d2db09df9..5bcd4e5589d3 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -1508,6 +1508,8 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
1508 if (ieee80211_chanctx_refcount(local, ctx) == 0) 1508 if (ieee80211_chanctx_refcount(local, ctx) == 0)
1509 ieee80211_free_chanctx(local, ctx); 1509 ieee80211_free_chanctx(local, ctx);
1510 1510
1511 sdata->radar_required = false;
1512
1511 /* Unreserving may ready an in-place reservation. */ 1513 /* Unreserving may ready an in-place reservation. */
1512 if (use_reserved_switch) 1514 if (use_reserved_switch)
1513 ieee80211_vif_use_reserved_switch(local); 1515 ieee80211_vif_use_reserved_switch(local);
@@ -1566,6 +1568,9 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
1566 ieee80211_recalc_smps_chanctx(local, ctx); 1568 ieee80211_recalc_smps_chanctx(local, ctx);
1567 ieee80211_recalc_radar_chanctx(local, ctx); 1569 ieee80211_recalc_radar_chanctx(local, ctx);
1568 out: 1570 out:
1571 if (ret)
1572 sdata->radar_required = false;
1573
1569 mutex_unlock(&local->chanctx_mtx); 1574 mutex_unlock(&local->chanctx_mtx);
1570 return ret; 1575 return ret;
1571} 1576}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 7c86a002df95..ef6e8a6c4253 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -373,7 +373,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
373 rate++; 373 rate++;
374 mi->sample_deferred++; 374 mi->sample_deferred++;
375 } else { 375 } else {
376 if (!msr->sample_limit != 0) 376 if (!msr->sample_limit)
377 return; 377 return;
378 378
379 mi->sample_packets++; 379 mi->sample_packets++;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 88a18ffe2975..07bd8db00af8 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -566,6 +566,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
566 if (tx->sdata->control_port_no_encrypt) 566 if (tx->sdata->control_port_no_encrypt)
567 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 567 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
568 info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; 568 info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
569 info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
569 } 570 }
570 571
571 return TX_CONTINUE; 572 return TX_CONTINUE;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 76cc9ffd87fa..49532672f66d 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3466,7 +3466,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3466 if (udest.af == 0) 3466 if (udest.af == 0)
3467 udest.af = svc->af; 3467 udest.af = svc->af;
3468 3468
3469 if (udest.af != svc->af) { 3469 if (udest.af != svc->af && cmd != IPVS_CMD_DEL_DEST) {
3470 /* The synchronization protocol is incompatible 3470 /* The synchronization protocol is incompatible
3471 * with mixed family services 3471 * with mixed family services
3472 */ 3472 */
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index a990df2f3f71..29fbcf25f88f 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -634,8 +634,12 @@ nft_match_select_ops(const struct nft_ctx *ctx,
634 struct xt_match *match = nft_match->ops.data; 634 struct xt_match *match = nft_match->ops.data;
635 635
636 if (strcmp(match->name, mt_name) == 0 && 636 if (strcmp(match->name, mt_name) == 0 &&
637 match->revision == rev && match->family == family) 637 match->revision == rev && match->family == family) {
638 if (!try_module_get(match->me))
639 return ERR_PTR(-ENOENT);
640
638 return &nft_match->ops; 641 return &nft_match->ops;
642 }
639 } 643 }
640 644
641 match = xt_request_find_match(family, mt_name, rev); 645 match = xt_request_find_match(family, mt_name, rev);
@@ -704,8 +708,12 @@ nft_target_select_ops(const struct nft_ctx *ctx,
704 struct xt_target *target = nft_target->ops.data; 708 struct xt_target *target = nft_target->ops.data;
705 709
706 if (strcmp(target->name, tg_name) == 0 && 710 if (strcmp(target->name, tg_name) == 0 &&
707 target->revision == rev && target->family == family) 711 target->revision == rev && target->family == family) {
712 if (!try_module_get(target->me))
713 return ERR_PTR(-ENOENT);
714
708 return &nft_target->ops; 715 return &nft_target->ops;
716 }
709 } 717 }
710 718
711 target = xt_request_find_target(family, tg_name, rev); 719 target = xt_request_find_target(family, tg_name, rev);
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 61e6c407476a..c82df0a48fcd 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -192,8 +192,6 @@ static int nft_hash_init(const struct nft_set *set,
192 .key_offset = offsetof(struct nft_hash_elem, key), 192 .key_offset = offsetof(struct nft_hash_elem, key),
193 .key_len = set->klen, 193 .key_len = set->klen,
194 .hashfn = jhash, 194 .hashfn = jhash,
195 .grow_decision = rht_grow_above_75,
196 .shrink_decision = rht_shrink_below_30,
197 }; 195 };
198 196
199 return rhashtable_init(priv, &params); 197 return rhashtable_init(priv, &params);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 30dbe34915ae..45e1b30e4fb2 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -378,12 +378,11 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
378 mutex_lock(&recent_mutex); 378 mutex_lock(&recent_mutex);
379 t = recent_table_lookup(recent_net, info->name); 379 t = recent_table_lookup(recent_net, info->name);
380 if (t != NULL) { 380 if (t != NULL) {
381 if (info->hit_count > t->nstamps_max_mask) { 381 if (nstamp_mask > t->nstamps_max_mask) {
382 pr_info("hitcount (%u) is larger than packets to be remembered (%u) for table %s\n", 382 spin_lock_bh(&recent_lock);
383 info->hit_count, t->nstamps_max_mask + 1, 383 recent_table_flush(t);
384 info->name); 384 t->nstamps_max_mask = nstamp_mask;
385 ret = -EINVAL; 385 spin_unlock_bh(&recent_lock);
386 goto out;
387 } 386 }
388 387
389 t->refcnt++; 388 t->refcnt++;
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 1ba67931eb1b..13332dbf291d 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -243,12 +243,13 @@ static int
243extract_icmp6_fields(const struct sk_buff *skb, 243extract_icmp6_fields(const struct sk_buff *skb,
244 unsigned int outside_hdrlen, 244 unsigned int outside_hdrlen,
245 int *protocol, 245 int *protocol,
246 struct in6_addr **raddr, 246 const struct in6_addr **raddr,
247 struct in6_addr **laddr, 247 const struct in6_addr **laddr,
248 __be16 *rport, 248 __be16 *rport,
249 __be16 *lport) 249 __be16 *lport,
250 struct ipv6hdr *ipv6_var)
250{ 251{
251 struct ipv6hdr *inside_iph, _inside_iph; 252 const struct ipv6hdr *inside_iph;
252 struct icmp6hdr *icmph, _icmph; 253 struct icmp6hdr *icmph, _icmph;
253 __be16 *ports, _ports[2]; 254 __be16 *ports, _ports[2];
254 u8 inside_nexthdr; 255 u8 inside_nexthdr;
@@ -263,12 +264,14 @@ extract_icmp6_fields(const struct sk_buff *skb,
263 if (icmph->icmp6_type & ICMPV6_INFOMSG_MASK) 264 if (icmph->icmp6_type & ICMPV6_INFOMSG_MASK)
264 return 1; 265 return 1;
265 266
266 inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph), sizeof(_inside_iph), &_inside_iph); 267 inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph),
268 sizeof(*ipv6_var), ipv6_var);
267 if (inside_iph == NULL) 269 if (inside_iph == NULL)
268 return 1; 270 return 1;
269 inside_nexthdr = inside_iph->nexthdr; 271 inside_nexthdr = inside_iph->nexthdr;
270 272
271 inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph), 273 inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) +
274 sizeof(*ipv6_var),
272 &inside_nexthdr, &inside_fragoff); 275 &inside_nexthdr, &inside_fragoff);
273 if (inside_hdrlen < 0) 276 if (inside_hdrlen < 0)
274 return 1; /* hjm: Packet has no/incomplete transport layer headers. */ 277 return 1; /* hjm: Packet has no/incomplete transport layer headers. */
@@ -315,10 +318,10 @@ xt_socket_get_sock_v6(struct net *net, const u8 protocol,
315static bool 318static bool
316socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) 319socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
317{ 320{
318 struct ipv6hdr *iph = ipv6_hdr(skb); 321 struct ipv6hdr ipv6_var, *iph = ipv6_hdr(skb);
319 struct udphdr _hdr, *hp = NULL; 322 struct udphdr _hdr, *hp = NULL;
320 struct sock *sk = skb->sk; 323 struct sock *sk = skb->sk;
321 struct in6_addr *daddr = NULL, *saddr = NULL; 324 const struct in6_addr *daddr = NULL, *saddr = NULL;
322 __be16 uninitialized_var(dport), uninitialized_var(sport); 325 __be16 uninitialized_var(dport), uninitialized_var(sport);
323 int thoff = 0, uninitialized_var(tproto); 326 int thoff = 0, uninitialized_var(tproto);
324 const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; 327 const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
@@ -342,7 +345,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
342 345
343 } else if (tproto == IPPROTO_ICMPV6) { 346 } else if (tproto == IPPROTO_ICMPV6) {
344 if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr, 347 if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
345 &sport, &dport)) 348 &sport, &dport, &ipv6_var))
346 return false; 349 return false;
347 } else { 350 } else {
348 return false; 351 return false;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a96025c0583f..6b0f21950e09 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -3124,8 +3124,6 @@ static int __init netlink_proto_init(void)
3124 .key_len = sizeof(u32), /* portid */ 3124 .key_len = sizeof(u32), /* portid */
3125 .hashfn = jhash, 3125 .hashfn = jhash,
3126 .max_shift = 16, /* 64K */ 3126 .max_shift = 16, /* 64K */
3127 .grow_decision = rht_grow_above_75,
3128 .shrink_decision = rht_shrink_below_30,
3129 }; 3127 };
3130 3128
3131 if (err != 0) 3129 if (err != 0)
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index ae5e77cdc0ca..5bae7243c577 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -2194,14 +2194,55 @@ static int __net_init ovs_init_net(struct net *net)
2194 return 0; 2194 return 0;
2195} 2195}
2196 2196
2197static void __net_exit ovs_exit_net(struct net *net) 2197static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2198 struct list_head *head)
2198{ 2199{
2199 struct datapath *dp, *dp_next;
2200 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 2200 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2201 struct datapath *dp;
2202
2203 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2204 int i;
2205
2206 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2207 struct vport *vport;
2208
2209 hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2210 struct netdev_vport *netdev_vport;
2211
2212 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2213 continue;
2214
2215 netdev_vport = netdev_vport_priv(vport);
2216 if (dev_net(netdev_vport->dev) == dnet)
2217 list_add(&vport->detach_list, head);
2218 }
2219 }
2220 }
2221}
2222
2223static void __net_exit ovs_exit_net(struct net *dnet)
2224{
2225 struct datapath *dp, *dp_next;
2226 struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2227 struct vport *vport, *vport_next;
2228 struct net *net;
2229 LIST_HEAD(head);
2201 2230
2202 ovs_lock(); 2231 ovs_lock();
2203 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) 2232 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2204 __dp_destroy(dp); 2233 __dp_destroy(dp);
2234
2235 rtnl_lock();
2236 for_each_net(net)
2237 list_vports_from_net(net, dnet, &head);
2238 rtnl_unlock();
2239
2240 /* Detach all vports from given namespace. */
2241 list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2242 list_del(&vport->detach_list);
2243 ovs_dp_detach_port(vport);
2244 }
2245
2205 ovs_unlock(); 2246 ovs_unlock();
2206 2247
2207 cancel_work_sync(&ovs_net->dp_notify_work); 2248 cancel_work_sync(&ovs_net->dp_notify_work);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 216f20b90aa5..22b18c145c92 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2253,14 +2253,20 @@ static int masked_set_action_to_set_action_attr(const struct nlattr *a,
2253 struct sk_buff *skb) 2253 struct sk_buff *skb)
2254{ 2254{
2255 const struct nlattr *ovs_key = nla_data(a); 2255 const struct nlattr *ovs_key = nla_data(a);
2256 struct nlattr *nla;
2256 size_t key_len = nla_len(ovs_key) / 2; 2257 size_t key_len = nla_len(ovs_key) / 2;
2257 2258
2258 /* Revert the conversion we did from a non-masked set action to 2259 /* Revert the conversion we did from a non-masked set action to
2259 * masked set action. 2260 * masked set action.
2260 */ 2261 */
2261 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a) - key_len, ovs_key)) 2262 nla = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
2263 if (!nla)
2262 return -EMSGSIZE; 2264 return -EMSGSIZE;
2263 2265
2266 if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key)))
2267 return -EMSGSIZE;
2268
2269 nla_nest_end(skb, nla);
2264 return 0; 2270 return 0;
2265} 2271}
2266 2272
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index f8ae295fb001..bc85331a6c60 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -103,6 +103,7 @@ struct vport_portids {
103 * @ops: Class structure. 103 * @ops: Class structure.
104 * @percpu_stats: Points to per-CPU statistics used and maintained by vport 104 * @percpu_stats: Points to per-CPU statistics used and maintained by vport
105 * @err_stats: Points to error statistics used and maintained by vport 105 * @err_stats: Points to error statistics used and maintained by vport
106 * @detach_list: list used for detaching vport in net-exit call.
106 */ 107 */
107struct vport { 108struct vport {
108 struct rcu_head rcu; 109 struct rcu_head rcu;
@@ -117,6 +118,7 @@ struct vport {
117 struct pcpu_sw_netstats __percpu *percpu_stats; 118 struct pcpu_sw_netstats __percpu *percpu_stats;
118 119
119 struct vport_err_stats err_stats; 120 struct vport_err_stats err_stats;
121 struct list_head detach_list;
120}; 122};
121 123
122/** 124/**
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 404c9735aee9..8167aecc1594 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -704,6 +704,10 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data)
704 704
705 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { 705 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
706 if (!frozen) { 706 if (!frozen) {
707 if (!BLOCK_NUM_PKTS(pbd)) {
708 /* An empty block. Just refresh the timer. */
709 goto refresh_timer;
710 }
707 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); 711 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
708 if (!prb_dispatch_next_block(pkc, po)) 712 if (!prb_dispatch_next_block(pkc, po))
709 goto refresh_timer; 713 goto refresh_timer;
@@ -804,7 +808,11 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
804 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; 808 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
805 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; 809 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
806 } else { 810 } else {
807 /* Ok, we tmo'd - so get the current time */ 811 /* Ok, we tmo'd - so get the current time.
812 *
813 * It shouldn't really happen as we don't close empty
814 * blocks. See prb_retire_rx_blk_timer_expired().
815 */
808 struct timespec ts; 816 struct timespec ts;
809 getnstimeofday(&ts); 817 getnstimeofday(&ts);
810 h1->ts_last_pkt.ts_sec = ts.tv_sec; 818 h1->ts_last_pkt.ts_sec = ts.tv_sec;
@@ -1355,14 +1363,14 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1355 return 0; 1363 return 0;
1356 } 1364 }
1357 1365
1366 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1367 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1368 if (!skb)
1369 return 0;
1370 }
1358 switch (f->type) { 1371 switch (f->type) {
1359 case PACKET_FANOUT_HASH: 1372 case PACKET_FANOUT_HASH:
1360 default: 1373 default:
1361 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1362 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1363 if (!skb)
1364 return 0;
1365 }
1366 idx = fanout_demux_hash(f, skb, num); 1374 idx = fanout_demux_hash(f, skb, num);
1367 break; 1375 break;
1368 case PACKET_FANOUT_LB: 1376 case PACKET_FANOUT_LB:
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index c6be17a959a6..e0547f521f20 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -218,7 +218,8 @@ static void rxrpc_resend(struct rxrpc_call *call)
218 struct rxrpc_header *hdr; 218 struct rxrpc_header *hdr;
219 struct sk_buff *txb; 219 struct sk_buff *txb;
220 unsigned long *p_txb, resend_at; 220 unsigned long *p_txb, resend_at;
221 int loop, stop; 221 bool stop;
222 int loop;
222 u8 resend; 223 u8 resend;
223 224
224 _enter("{%d,%d,%d,%d},", 225 _enter("{%d,%d,%d,%d},",
@@ -226,7 +227,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
226 atomic_read(&call->sequence), 227 atomic_read(&call->sequence),
227 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); 228 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
228 229
229 stop = 0; 230 stop = false;
230 resend = 0; 231 resend = 0;
231 resend_at = 0; 232 resend_at = 0;
232 233
@@ -255,11 +256,11 @@ static void rxrpc_resend(struct rxrpc_call *call)
255 _proto("Tx DATA %%%u { #%d }", 256 _proto("Tx DATA %%%u { #%d }",
256 ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); 257 ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
257 if (rxrpc_send_packet(call->conn->trans, txb) < 0) { 258 if (rxrpc_send_packet(call->conn->trans, txb) < 0) {
258 stop = 0; 259 stop = true;
259 sp->resend_at = jiffies + 3; 260 sp->resend_at = jiffies + 3;
260 } else { 261 } else {
261 sp->resend_at = 262 sp->resend_at =
262 jiffies + rxrpc_resend_timeout * HZ; 263 jiffies + rxrpc_resend_timeout;
263 } 264 }
264 } 265 }
265 266
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 899d0319f2b2..2274e723a3df 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -348,7 +348,7 @@ config NET_SCH_PLUG
348comment "Classification" 348comment "Classification"
349 349
350config NET_CLS 350config NET_CLS
351 boolean 351 bool
352 352
353config NET_CLS_BASIC 353config NET_CLS_BASIC
354 tristate "Elementary classification (BASIC)" 354 tristate "Elementary classification (BASIC)"
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 6742200b1307..fbb7ebfc58c6 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -228,6 +228,7 @@ static int tcf_em_validate(struct tcf_proto *tp,
228 * to replay the request. 228 * to replay the request.
229 */ 229 */
230 module_put(em->ops->owner); 230 module_put(em->ops->owner);
231 em->ops = NULL;
231 err = -EAGAIN; 232 err = -EAGAIN;
232 } 233 }
233#endif 234#endif
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index abbb7dcd1689..59eeed43eda2 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -217,6 +217,8 @@ static void gssp_free_receive_pages(struct gssx_arg_accept_sec_context *arg)
217 217
218 for (i = 0; i < arg->npages && arg->pages[i]; i++) 218 for (i = 0; i < arg->npages && arg->pages[i]; i++)
219 __free_page(arg->pages[i]); 219 __free_page(arg->pages[i]);
220
221 kfree(arg->pages);
220} 222}
221 223
222static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg) 224static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg)
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 224a82f24d3c..1095be9c80ab 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -463,6 +463,8 @@ static int rsc_parse(struct cache_detail *cd,
463 /* number of additional gid's */ 463 /* number of additional gid's */
464 if (get_int(&mesg, &N)) 464 if (get_int(&mesg, &N))
465 goto out; 465 goto out;
466 if (N < 0 || N > NGROUPS_MAX)
467 goto out;
466 status = -ENOMEM; 468 status = -ENOMEM;
467 rsci.cred.cr_group_info = groups_alloc(N); 469 rsci.cred.cr_group_info = groups_alloc(N);
468 if (rsci.cred.cr_group_info == NULL) 470 if (rsci.cred.cr_group_info == NULL)
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 651f49ab601f..9dd0ea8db463 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -309,12 +309,15 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
309 struct rpc_xprt *xprt = req->rq_xprt; 309 struct rpc_xprt *xprt = req->rq_xprt;
310 struct svc_serv *bc_serv = xprt->bc_serv; 310 struct svc_serv *bc_serv = xprt->bc_serv;
311 311
312 spin_lock(&xprt->bc_pa_lock);
313 list_del(&req->rq_bc_pa_list);
314 spin_unlock(&xprt->bc_pa_lock);
315
312 req->rq_private_buf.len = copied; 316 req->rq_private_buf.len = copied;
313 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 317 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
314 318
315 dprintk("RPC: add callback request to list\n"); 319 dprintk("RPC: add callback request to list\n");
316 spin_lock(&bc_serv->sv_cb_lock); 320 spin_lock(&bc_serv->sv_cb_lock);
317 list_del(&req->rq_bc_pa_list);
318 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list); 321 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
319 wake_up(&bc_serv->sv_cb_waitq); 322 wake_up(&bc_serv->sv_cb_waitq);
320 spin_unlock(&bc_serv->sv_cb_lock); 323 spin_unlock(&bc_serv->sv_cb_lock);
diff --git a/net/switchdev/Kconfig b/net/switchdev/Kconfig
index 155754588fd6..86a47e17cfaf 100644
--- a/net/switchdev/Kconfig
+++ b/net/switchdev/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config NET_SWITCHDEV 5config NET_SWITCHDEV
6 boolean "Switch (and switch-ish) device support (EXPERIMENTAL)" 6 bool "Switch (and switch-ish) device support (EXPERIMENTAL)"
7 depends on INET 7 depends on INET
8 ---help--- 8 ---help---
9 This module provides glue between core networking code and device 9 This module provides glue between core networking code and device
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index dcb797c60806..95c514a1d7d9 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2363,8 +2363,6 @@ int tipc_sk_rht_init(struct net *net)
2363 .hashfn = jhash, 2363 .hashfn = jhash,
2364 .max_shift = 20, /* 1M */ 2364 .max_shift = 20, /* 1M */
2365 .min_shift = 8, /* 256 */ 2365 .min_shift = 8, /* 256 */
2366 .grow_decision = rht_grow_above_75,
2367 .shrink_decision = rht_shrink_below_30,
2368 }; 2366 };
2369 2367
2370 return rhashtable_init(&tn->sk_rht, &rht_params); 2368 return rhashtable_init(&tn->sk_rht, &rht_params);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3af0ecf1cc16..2a0bbd22854b 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1199,6 +1199,7 @@ out_fail_wq:
1199 regulatory_exit(); 1199 regulatory_exit();
1200out_fail_reg: 1200out_fail_reg:
1201 debugfs_remove(ieee80211_debugfs_dir); 1201 debugfs_remove(ieee80211_debugfs_dir);
1202 nl80211_exit();
1202out_fail_nl80211: 1203out_fail_nl80211:
1203 unregister_netdevice_notifier(&cfg80211_netdev_notifier); 1204 unregister_netdevice_notifier(&cfg80211_netdev_notifier);
1204out_fail_notifier: 1205out_fail_notifier:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 96fe32889f5e..864b782c0202 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2654,10 +2654,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2654 return err; 2654 return err;
2655 } 2655 }
2656 2656
2657 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2658 if (!msg)
2659 return -ENOMEM;
2660
2661 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? 2657 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
2662 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, 2658 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
2663 &flags); 2659 &flags);
@@ -2666,6 +2662,10 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2666 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) 2662 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
2667 return -EOPNOTSUPP; 2663 return -EOPNOTSUPP;
2668 2664
2665 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2666 if (!msg)
2667 return -ENOMEM;
2668
2669 wdev = rdev_add_virtual_intf(rdev, 2669 wdev = rdev_add_virtual_intf(rdev,
2670 nla_data(info->attrs[NL80211_ATTR_IFNAME]), 2670 nla_data(info->attrs[NL80211_ATTR_IFNAME]),
2671 type, err ? NULL : &flags, &params); 2671 type, err ? NULL : &flags, &params);
@@ -12528,9 +12528,7 @@ static int cfg80211_net_detect_results(struct sk_buff *msg,
12528 } 12528 }
12529 12529
12530 for (j = 0; j < match->n_channels; j++) { 12530 for (j = 0; j < match->n_channels; j++) {
12531 if (nla_put_u32(msg, 12531 if (nla_put_u32(msg, j, match->channels[j])) {
12532 NL80211_ATTR_WIPHY_FREQ,
12533 match->channels[j])) {
12534 nla_nest_cancel(msg, nl_freqs); 12532 nla_nest_cancel(msg, nl_freqs);
12535 nla_nest_cancel(msg, nl_match); 12533 nla_nest_cancel(msg, nl_match);
12536 goto out; 12534 goto out;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index b586d0dcb09e..48dfc7b4e981 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -228,7 +228,7 @@ static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
228 228
229/* We keep a static world regulatory domain in case of the absence of CRDA */ 229/* We keep a static world regulatory domain in case of the absence of CRDA */
230static const struct ieee80211_regdomain world_regdom = { 230static const struct ieee80211_regdomain world_regdom = {
231 .n_reg_rules = 6, 231 .n_reg_rules = 8,
232 .alpha2 = "00", 232 .alpha2 = "00",
233 .reg_rules = { 233 .reg_rules = {
234 /* IEEE 802.11b/g, channels 1..11 */ 234 /* IEEE 802.11b/g, channels 1..11 */
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index edd2794569db..d3437b82ac25 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -129,17 +129,15 @@ cc-disable-warning = $(call try-run,\
129 $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) 129 $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
130 130
131# cc-version 131# cc-version
132# Usage gcc-ver := $(call cc-version)
133cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC)) 132cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
134 133
135# cc-fullversion 134# cc-fullversion
136# Usage gcc-ver := $(call cc-fullversion)
137cc-fullversion = $(shell $(CONFIG_SHELL) \ 135cc-fullversion = $(shell $(CONFIG_SHELL) \
138 $(srctree)/scripts/gcc-version.sh -p $(CC)) 136 $(srctree)/scripts/gcc-version.sh -p $(CC))
139 137
140# cc-ifversion 138# cc-ifversion
141# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) 139# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
142cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3)) 140cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
143 141
144# cc-ldoption 142# cc-ldoption
145# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) 143# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
@@ -157,13 +155,12 @@ ld-option = $(call try-run,\
157ar-option = $(call try-run, $(AR) rc$(1) "$$TMP",$(1),$(2)) 155ar-option = $(call try-run, $(AR) rc$(1) "$$TMP",$(1),$(2))
158 156
159# ld-version 157# ld-version
160# Usage: $(call ld-version)
161# Note this is mainly for HJ Lu's 3 number binutil versions 158# Note this is mainly for HJ Lu's 3 number binutil versions
162ld-version = $(shell $(LD) --version | $(srctree)/scripts/ld-version.sh) 159ld-version = $(shell $(LD) --version | $(srctree)/scripts/ld-version.sh)
163 160
164# ld-ifversion 161# ld-ifversion
165# Usage: $(call ld-ifversion, -ge, 22252, y) 162# Usage: $(call ld-ifversion, -ge, 22252, y)
166ld-ifversion = $(shell [ $(call ld-version) $(1) $(2) ] && echo $(3)) 163ld-ifversion = $(shell [ $(ld-version) $(1) $(2) ] && echo $(3) || echo $(4))
167 164
168###### 165######
169 166
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
index 627f8cbbedb8..55c96cb8070f 100644
--- a/scripts/Makefile.clean
+++ b/scripts/Makefile.clean
@@ -71,9 +71,6 @@ endif
71ifneq ($(strip $(__clean-dirs)),) 71ifneq ($(strip $(__clean-dirs)),)
72 +$(call cmd,cleandir) 72 +$(call cmd,cleandir)
73endif 73endif
74ifneq ($(strip $(clean-rule)),)
75 +$(clean-rule)
76endif
77 @: 74 @:
78 75
79 76
diff --git a/scripts/gdb/linux/__init__.py b/scripts/gdb/linux/__init__.py
new file mode 100644
index 000000000000..4680fb176337
--- /dev/null
+++ b/scripts/gdb/linux/__init__.py
@@ -0,0 +1 @@
# nothing to do for the initialization of this package
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index f88d90f20228..28df18dd1147 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -59,6 +59,7 @@ static void conf_message(const char *fmt, ...)
59 va_start(ap, fmt); 59 va_start(ap, fmt);
60 if (conf_message_callback) 60 if (conf_message_callback)
61 conf_message_callback(fmt, ap); 61 conf_message_callback(fmt, ap);
62 va_end(ap);
62} 63}
63 64
64const char *conf_get_configname(void) 65const char *conf_get_configname(void)
diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
index 81b0c61bb9e2..2ab91b9b100d 100755
--- a/scripts/kconfig/merge_config.sh
+++ b/scripts/kconfig/merge_config.sh
@@ -77,6 +77,11 @@ while true; do
77 esac 77 esac
78done 78done
79 79
80if [ "$#" -lt 2 ] ; then
81 usage
82 exit
83fi
84
80INITFILE=$1 85INITFILE=$1
81shift; 86shift;
82 87
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 59726243c2eb..88dbf23b6970 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -217,9 +217,20 @@ else
217fi 217fi
218maintainer="$name <$email>" 218maintainer="$name <$email>"
219 219
220# Try to determine distribution
221if [ -n "$KDEB_CHANGELOG_DIST" ]; then
222 distribution=$KDEB_CHANGELOG_DIST
223elif distribution=$(lsb_release -cs 2>/dev/null) && [ -n "$distribution" ]; then
224 : # nothing to do in this case
225else
226 distribution="unstable"
227 echo >&2 "Using default distribution of 'unstable' in the changelog"
228 echo >&2 "Install lsb-release or set \$KDEB_CHANGELOG_DIST explicitly"
229fi
230
220# Generate a simple changelog template 231# Generate a simple changelog template
221cat <<EOF > debian/changelog 232cat <<EOF > debian/changelog
222linux-upstream ($packageversion) unstable; urgency=low 233linux-upstream ($packageversion) $distribution; urgency=low
223 234
224 * Custom built Linux kernel. 235 * Custom built Linux kernel.
225 236
@@ -233,10 +244,10 @@ This is a packacked upstream version of the Linux kernel.
233The sources may be found at most Linux ftp sites, including: 244The sources may be found at most Linux ftp sites, including:
234ftp://ftp.kernel.org/pub/linux/kernel 245ftp://ftp.kernel.org/pub/linux/kernel
235 246
236Copyright: 1991 - 2009 Linus Torvalds and others. 247Copyright: 1991 - 2015 Linus Torvalds and others.
237 248
238The git repository for mainline kernel development is at: 249The git repository for mainline kernel development is at:
239git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git 250git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
240 251
241 This program is free software; you can redistribute it and/or modify 252 This program is free software; you can redistribute it and/or modify
242 it under the terms of the GNU General Public License as published by 253 it under the terms of the GNU General Public License as published by
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
index 97130f88838b..e4ea62663866 100644
--- a/security/apparmor/include/apparmor.h
+++ b/security/apparmor/include/apparmor.h
@@ -112,9 +112,9 @@ static inline unsigned int aa_dfa_null_transition(struct aa_dfa *dfa,
112 return aa_dfa_next(dfa, start, 0); 112 return aa_dfa_next(dfa, start, 0);
113} 113}
114 114
115static inline bool mediated_filesystem(struct inode *inode) 115static inline bool mediated_filesystem(struct dentry *dentry)
116{ 116{
117 return !(inode->i_sb->s_flags & MS_NOUSER); 117 return !(dentry->d_sb->s_flags & MS_NOUSER);
118} 118}
119 119
120#endif /* __APPARMOR_H */ 120#endif /* __APPARMOR_H */
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 65ca451a764d..107db88b1d5f 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -226,7 +226,7 @@ static int common_perm_rm(int op, struct path *dir,
226 struct inode *inode = dentry->d_inode; 226 struct inode *inode = dentry->d_inode;
227 struct path_cond cond = { }; 227 struct path_cond cond = { };
228 228
229 if (!inode || !dir->mnt || !mediated_filesystem(inode)) 229 if (!inode || !dir->mnt || !mediated_filesystem(dentry))
230 return 0; 230 return 0;
231 231
232 cond.uid = inode->i_uid; 232 cond.uid = inode->i_uid;
@@ -250,7 +250,7 @@ static int common_perm_create(int op, struct path *dir, struct dentry *dentry,
250{ 250{
251 struct path_cond cond = { current_fsuid(), mode }; 251 struct path_cond cond = { current_fsuid(), mode };
252 252
253 if (!dir->mnt || !mediated_filesystem(dir->dentry->d_inode)) 253 if (!dir->mnt || !mediated_filesystem(dir->dentry))
254 return 0; 254 return 0;
255 255
256 return common_perm_dir_dentry(op, dir, dentry, mask, &cond); 256 return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
@@ -285,7 +285,7 @@ static int apparmor_path_truncate(struct path *path)
285 path->dentry->d_inode->i_mode 285 path->dentry->d_inode->i_mode
286 }; 286 };
287 287
288 if (!path->mnt || !mediated_filesystem(path->dentry->d_inode)) 288 if (!path->mnt || !mediated_filesystem(path->dentry))
289 return 0; 289 return 0;
290 290
291 return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE, 291 return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE,
@@ -305,7 +305,7 @@ static int apparmor_path_link(struct dentry *old_dentry, struct path *new_dir,
305 struct aa_profile *profile; 305 struct aa_profile *profile;
306 int error = 0; 306 int error = 0;
307 307
308 if (!mediated_filesystem(old_dentry->d_inode)) 308 if (!mediated_filesystem(old_dentry))
309 return 0; 309 return 0;
310 310
311 profile = aa_current_profile(); 311 profile = aa_current_profile();
@@ -320,7 +320,7 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
320 struct aa_profile *profile; 320 struct aa_profile *profile;
321 int error = 0; 321 int error = 0;
322 322
323 if (!mediated_filesystem(old_dentry->d_inode)) 323 if (!mediated_filesystem(old_dentry))
324 return 0; 324 return 0;
325 325
326 profile = aa_current_profile(); 326 profile = aa_current_profile();
@@ -346,7 +346,7 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
346 346
347static int apparmor_path_chmod(struct path *path, umode_t mode) 347static int apparmor_path_chmod(struct path *path, umode_t mode)
348{ 348{
349 if (!mediated_filesystem(path->dentry->d_inode)) 349 if (!mediated_filesystem(path->dentry))
350 return 0; 350 return 0;
351 351
352 return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD); 352 return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD);
@@ -358,7 +358,7 @@ static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
358 path->dentry->d_inode->i_mode 358 path->dentry->d_inode->i_mode
359 }; 359 };
360 360
361 if (!mediated_filesystem(path->dentry->d_inode)) 361 if (!mediated_filesystem(path->dentry))
362 return 0; 362 return 0;
363 363
364 return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond); 364 return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond);
@@ -366,7 +366,7 @@ static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
366 366
367static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) 367static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
368{ 368{
369 if (!mediated_filesystem(dentry->d_inode)) 369 if (!mediated_filesystem(dentry))
370 return 0; 370 return 0;
371 371
372 return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry, 372 return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry,
@@ -379,7 +379,7 @@ static int apparmor_file_open(struct file *file, const struct cred *cred)
379 struct aa_profile *profile; 379 struct aa_profile *profile;
380 int error = 0; 380 int error = 0;
381 381
382 if (!mediated_filesystem(file_inode(file))) 382 if (!mediated_filesystem(file->f_path.dentry))
383 return 0; 383 return 0;
384 384
385 /* If in exec, permission is handled by bprm hooks. 385 /* If in exec, permission is handled by bprm hooks.
@@ -432,7 +432,7 @@ static int common_file_perm(int op, struct file *file, u32 mask)
432 BUG_ON(!fprofile); 432 BUG_ON(!fprofile);
433 433
434 if (!file->f_path.mnt || 434 if (!file->f_path.mnt ||
435 !mediated_filesystem(file_inode(file))) 435 !mediated_filesystem(file->f_path.dentry))
436 return 0; 436 return 0;
437 437
438 profile = __aa_current_profile(); 438 profile = __aa_current_profile();
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 35b394a75d76..71e0e3a15b9d 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -114,7 +114,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
114 * security_path hooks as a deleted dentry except without an inode 114 * security_path hooks as a deleted dentry except without an inode
115 * allocated. 115 * allocated.
116 */ 116 */
117 if (d_unlinked(path->dentry) && path->dentry->d_inode && 117 if (d_unlinked(path->dentry) && d_is_positive(path->dentry) &&
118 !(flags & PATH_MEDIATE_DELETED)) { 118 !(flags & PATH_MEDIATE_DELETED)) {
119 error = -ENOENT; 119 error = -ENOENT;
120 goto out; 120 goto out;
diff --git a/security/inode.c b/security/inode.c
index 8e7ca62078ab..131a3c49f766 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -203,7 +203,7 @@ void securityfs_remove(struct dentry *dentry)
203 mutex_lock(&parent->d_inode->i_mutex); 203 mutex_lock(&parent->d_inode->i_mutex);
204 if (positive(dentry)) { 204 if (positive(dentry)) {
205 if (dentry->d_inode) { 205 if (dentry->d_inode) {
206 if (S_ISDIR(dentry->d_inode->i_mode)) 206 if (d_is_dir(dentry))
207 simple_rmdir(parent->d_inode, dentry); 207 simple_rmdir(parent->d_inode, dentry);
208 else 208 else
209 simple_unlink(parent->d_inode, dentry); 209 simple_unlink(parent->d_inode, dentry);
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
index b76235ae4786..73c457bf5a4a 100644
--- a/security/integrity/Kconfig
+++ b/security/integrity/Kconfig
@@ -16,7 +16,7 @@ config INTEGRITY
16if INTEGRITY 16if INTEGRITY
17 17
18config INTEGRITY_SIGNATURE 18config INTEGRITY_SIGNATURE
19 boolean "Digital signature verification using multiple keyrings" 19 bool "Digital signature verification using multiple keyrings"
20 depends on KEYS 20 depends on KEYS
21 default n 21 default n
22 select SIGNATURE 22 select SIGNATURE
@@ -30,7 +30,7 @@ config INTEGRITY_SIGNATURE
30 usually only added from initramfs. 30 usually only added from initramfs.
31 31
32config INTEGRITY_ASYMMETRIC_KEYS 32config INTEGRITY_ASYMMETRIC_KEYS
33 boolean "Enable asymmetric keys support" 33 bool "Enable asymmetric keys support"
34 depends on INTEGRITY_SIGNATURE 34 depends on INTEGRITY_SIGNATURE
35 default n 35 default n
36 select ASYMMETRIC_KEY_TYPE 36 select ASYMMETRIC_KEY_TYPE
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
index df586fa00ef1..bf19723cf117 100644
--- a/security/integrity/evm/Kconfig
+++ b/security/integrity/evm/Kconfig
@@ -1,5 +1,5 @@
1config EVM 1config EVM
2 boolean "EVM support" 2 bool "EVM support"
3 select KEYS 3 select KEYS
4 select ENCRYPTED_KEYS 4 select ENCRYPTED_KEYS
5 select CRYPTO_HMAC 5 select CRYPTO_HMAC
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 29c39e0b03ed..4d1a54190388 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1799,7 +1799,7 @@ static inline int may_rename(struct inode *old_dir,
1799 1799
1800 old_dsec = old_dir->i_security; 1800 old_dsec = old_dir->i_security;
1801 old_isec = old_dentry->d_inode->i_security; 1801 old_isec = old_dentry->d_inode->i_security;
1802 old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode); 1802 old_is_dir = d_is_dir(old_dentry);
1803 new_dsec = new_dir->i_security; 1803 new_dsec = new_dir->i_security;
1804 1804
1805 ad.type = LSM_AUDIT_DATA_DENTRY; 1805 ad.type = LSM_AUDIT_DATA_DENTRY;
@@ -1822,14 +1822,14 @@ static inline int may_rename(struct inode *old_dir,
1822 1822
1823 ad.u.dentry = new_dentry; 1823 ad.u.dentry = new_dentry;
1824 av = DIR__ADD_NAME | DIR__SEARCH; 1824 av = DIR__ADD_NAME | DIR__SEARCH;
1825 if (new_dentry->d_inode) 1825 if (d_is_positive(new_dentry))
1826 av |= DIR__REMOVE_NAME; 1826 av |= DIR__REMOVE_NAME;
1827 rc = avc_has_perm(sid, new_dsec->sid, SECCLASS_DIR, av, &ad); 1827 rc = avc_has_perm(sid, new_dsec->sid, SECCLASS_DIR, av, &ad);
1828 if (rc) 1828 if (rc)
1829 return rc; 1829 return rc;
1830 if (new_dentry->d_inode) { 1830 if (d_is_positive(new_dentry)) {
1831 new_isec = new_dentry->d_inode->i_security; 1831 new_isec = new_dentry->d_inode->i_security;
1832 new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode); 1832 new_is_dir = d_is_dir(new_dentry);
1833 rc = avc_has_perm(sid, new_isec->sid, 1833 rc = avc_has_perm(sid, new_isec->sid,
1834 new_isec->sclass, 1834 new_isec->sclass,
1835 (new_is_dir ? DIR__RMDIR : FILE__UNLINK), &ad); 1835 (new_is_dir ? DIR__RMDIR : FILE__UNLINK), &ad);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index ed94f6f836e7..c934311812f1 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -855,7 +855,7 @@ static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
855 rc = smk_curacc(isp, MAY_WRITE, &ad); 855 rc = smk_curacc(isp, MAY_WRITE, &ad);
856 rc = smk_bu_inode(old_dentry->d_inode, MAY_WRITE, rc); 856 rc = smk_bu_inode(old_dentry->d_inode, MAY_WRITE, rc);
857 857
858 if (rc == 0 && new_dentry->d_inode != NULL) { 858 if (rc == 0 && d_is_positive(new_dentry)) {
859 isp = smk_of_inode(new_dentry->d_inode); 859 isp = smk_of_inode(new_dentry->d_inode);
860 smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry); 860 smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
861 rc = smk_curacc(isp, MAY_WRITE, &ad); 861 rc = smk_curacc(isp, MAY_WRITE, &ad);
@@ -961,7 +961,7 @@ static int smack_inode_rename(struct inode *old_inode,
961 rc = smk_curacc(isp, MAY_READWRITE, &ad); 961 rc = smk_curacc(isp, MAY_READWRITE, &ad);
962 rc = smk_bu_inode(old_dentry->d_inode, MAY_READWRITE, rc); 962 rc = smk_bu_inode(old_dentry->d_inode, MAY_READWRITE, rc);
963 963
964 if (rc == 0 && new_dentry->d_inode != NULL) { 964 if (rc == 0 && d_is_positive(new_dentry)) {
965 isp = smk_of_inode(new_dentry->d_inode); 965 isp = smk_of_inode(new_dentry->d_inode);
966 smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry); 966 smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
967 rc = smk_curacc(isp, MAY_READWRITE, &ad); 967 rc = smk_curacc(isp, MAY_READWRITE, &ad);
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 400390790745..c151a1869597 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -905,11 +905,9 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1,
905 !tomoyo_get_realpath(&buf2, path2)) 905 !tomoyo_get_realpath(&buf2, path2))
906 goto out; 906 goto out;
907 switch (operation) { 907 switch (operation) {
908 struct dentry *dentry;
909 case TOMOYO_TYPE_RENAME: 908 case TOMOYO_TYPE_RENAME:
910 case TOMOYO_TYPE_LINK: 909 case TOMOYO_TYPE_LINK:
911 dentry = path1->dentry; 910 if (!d_is_dir(path1->dentry))
912 if (!dentry->d_inode || !S_ISDIR(dentry->d_inode->i_mode))
913 break; 911 break;
914 /* fall through */ 912 /* fall through */
915 case TOMOYO_TYPE_PIVOT_ROOT: 913 case TOMOYO_TYPE_PIVOT_ROOT:
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index b03a638b420c..279e24f61305 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1552,6 +1552,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
1552 if (! snd_pcm_playback_empty(substream)) { 1552 if (! snd_pcm_playback_empty(substream)) {
1553 snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING); 1553 snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
1554 snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING); 1554 snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
1555 } else {
1556 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1555 } 1557 }
1556 break; 1558 break;
1557 case SNDRV_PCM_STATE_RUNNING: 1559 case SNDRV_PCM_STATE_RUNNING:
diff --git a/sound/core/seq/seq_midi_emul.c b/sound/core/seq/seq_midi_emul.c
index 9b6470cdcf24..7ba937399ac7 100644
--- a/sound/core/seq/seq_midi_emul.c
+++ b/sound/core/seq/seq_midi_emul.c
@@ -269,6 +269,9 @@ do_control(struct snd_midi_op *ops, void *drv, struct snd_midi_channel_set *chse
269{ 269{
270 int i; 270 int i;
271 271
272 if (control >= ARRAY_SIZE(chan->control))
273 return;
274
272 /* Switches */ 275 /* Switches */
273 if ((control >=64 && control <=69) || (control >= 80 && control <= 83)) { 276 if ((control >=64 && control <=69) || (control >= 80 && control <= 83)) {
274 /* These are all switches; either off or on so set to 0 or 127 */ 277 /* These are all switches; either off or on so set to 0 or 127 */
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index 0d580186ef1a..5cc356db5351 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -33,7 +33,7 @@
33 */ 33 */
34#define MAX_MIDI_RX_BLOCKS 8 34#define MAX_MIDI_RX_BLOCKS 8
35 35
36#define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 µs */ 36#define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
37 37
38/* isochronous header parameters */ 38/* isochronous header parameters */
39#define ISO_DATA_LENGTH_SHIFT 16 39#define ISO_DATA_LENGTH_SHIFT 16
@@ -78,7 +78,7 @@ static void pcm_period_tasklet(unsigned long data);
78int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, 78int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
79 enum amdtp_stream_direction dir, enum cip_flags flags) 79 enum amdtp_stream_direction dir, enum cip_flags flags)
80{ 80{
81 s->unit = fw_unit_get(unit); 81 s->unit = unit;
82 s->direction = dir; 82 s->direction = dir;
83 s->flags = flags; 83 s->flags = flags;
84 s->context = ERR_PTR(-1); 84 s->context = ERR_PTR(-1);
@@ -102,7 +102,6 @@ void amdtp_stream_destroy(struct amdtp_stream *s)
102{ 102{
103 WARN_ON(amdtp_stream_running(s)); 103 WARN_ON(amdtp_stream_running(s));
104 mutex_destroy(&s->mutex); 104 mutex_destroy(&s->mutex);
105 fw_unit_put(s->unit);
106} 105}
107EXPORT_SYMBOL(amdtp_stream_destroy); 106EXPORT_SYMBOL(amdtp_stream_destroy);
108 107
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index fc19c99654aa..611b7dae7ee5 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -116,11 +116,22 @@ end:
116 return err; 116 return err;
117} 117}
118 118
119/*
120 * This module releases the FireWire unit data after all ALSA character devices
121 * are released by applications. This is for releasing stream data or finishing
122 * transactions safely. Thus at returning from .remove(), this module still keep
123 * references for the unit.
124 */
119static void 125static void
120bebob_card_free(struct snd_card *card) 126bebob_card_free(struct snd_card *card)
121{ 127{
122 struct snd_bebob *bebob = card->private_data; 128 struct snd_bebob *bebob = card->private_data;
123 129
130 snd_bebob_stream_destroy_duplex(bebob);
131 fw_unit_put(bebob->unit);
132
133 kfree(bebob->maudio_special_quirk);
134
124 if (bebob->card_index >= 0) { 135 if (bebob->card_index >= 0) {
125 mutex_lock(&devices_mutex); 136 mutex_lock(&devices_mutex);
126 clear_bit(bebob->card_index, devices_used); 137 clear_bit(bebob->card_index, devices_used);
@@ -205,7 +216,7 @@ bebob_probe(struct fw_unit *unit,
205 card->private_free = bebob_card_free; 216 card->private_free = bebob_card_free;
206 217
207 bebob->card = card; 218 bebob->card = card;
208 bebob->unit = unit; 219 bebob->unit = fw_unit_get(unit);
209 bebob->spec = spec; 220 bebob->spec = spec;
210 mutex_init(&bebob->mutex); 221 mutex_init(&bebob->mutex);
211 spin_lock_init(&bebob->lock); 222 spin_lock_init(&bebob->lock);
@@ -306,10 +317,11 @@ static void bebob_remove(struct fw_unit *unit)
306 if (bebob == NULL) 317 if (bebob == NULL)
307 return; 318 return;
308 319
309 kfree(bebob->maudio_special_quirk); 320 /* Awake bus-reset waiters. */
321 if (!completion_done(&bebob->bus_reset))
322 complete_all(&bebob->bus_reset);
310 323
311 snd_bebob_stream_destroy_duplex(bebob); 324 /* No need to wait for releasing card object in this context. */
312 snd_card_disconnect(bebob->card);
313 snd_card_free_when_closed(bebob->card); 325 snd_card_free_when_closed(bebob->card);
314} 326}
315 327
diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
index 0ebcabfdc7ce..98e4fc8121a1 100644
--- a/sound/firewire/bebob/bebob_stream.c
+++ b/sound/firewire/bebob/bebob_stream.c
@@ -410,8 +410,6 @@ break_both_connections(struct snd_bebob *bebob)
410static void 410static void
411destroy_both_connections(struct snd_bebob *bebob) 411destroy_both_connections(struct snd_bebob *bebob)
412{ 412{
413 break_both_connections(bebob);
414
415 cmp_connection_destroy(&bebob->in_conn); 413 cmp_connection_destroy(&bebob->in_conn);
416 cmp_connection_destroy(&bebob->out_conn); 414 cmp_connection_destroy(&bebob->out_conn);
417} 415}
@@ -712,22 +710,16 @@ void snd_bebob_stream_update_duplex(struct snd_bebob *bebob)
712 mutex_unlock(&bebob->mutex); 710 mutex_unlock(&bebob->mutex);
713} 711}
714 712
713/*
714 * This function should be called before starting streams or after stopping
715 * streams.
716 */
715void snd_bebob_stream_destroy_duplex(struct snd_bebob *bebob) 717void snd_bebob_stream_destroy_duplex(struct snd_bebob *bebob)
716{ 718{
717 mutex_lock(&bebob->mutex);
718
719 amdtp_stream_pcm_abort(&bebob->rx_stream);
720 amdtp_stream_pcm_abort(&bebob->tx_stream);
721
722 amdtp_stream_stop(&bebob->rx_stream);
723 amdtp_stream_stop(&bebob->tx_stream);
724
725 amdtp_stream_destroy(&bebob->rx_stream); 719 amdtp_stream_destroy(&bebob->rx_stream);
726 amdtp_stream_destroy(&bebob->tx_stream); 720 amdtp_stream_destroy(&bebob->tx_stream);
727 721
728 destroy_both_connections(bebob); 722 destroy_both_connections(bebob);
729
730 mutex_unlock(&bebob->mutex);
731} 723}
732 724
733/* 725/*
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index fa9cf761b610..07dbd01d7a6b 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -311,14 +311,21 @@ end:
311 return err; 311 return err;
312} 312}
313 313
314/*
315 * This function should be called before starting streams or after stopping
316 * streams.
317 */
314static void destroy_stream(struct snd_dice *dice, struct amdtp_stream *stream) 318static void destroy_stream(struct snd_dice *dice, struct amdtp_stream *stream)
315{ 319{
316 amdtp_stream_destroy(stream); 320 struct fw_iso_resources *resources;
317 321
318 if (stream == &dice->tx_stream) 322 if (stream == &dice->tx_stream)
319 fw_iso_resources_destroy(&dice->tx_resources); 323 resources = &dice->tx_resources;
320 else 324 else
321 fw_iso_resources_destroy(&dice->rx_resources); 325 resources = &dice->rx_resources;
326
327 amdtp_stream_destroy(stream);
328 fw_iso_resources_destroy(resources);
322} 329}
323 330
324int snd_dice_stream_init_duplex(struct snd_dice *dice) 331int snd_dice_stream_init_duplex(struct snd_dice *dice)
@@ -332,6 +339,8 @@ int snd_dice_stream_init_duplex(struct snd_dice *dice)
332 goto end; 339 goto end;
333 340
334 err = init_stream(dice, &dice->rx_stream); 341 err = init_stream(dice, &dice->rx_stream);
342 if (err < 0)
343 destroy_stream(dice, &dice->tx_stream);
335end: 344end:
336 return err; 345 return err;
337} 346}
@@ -340,10 +349,7 @@ void snd_dice_stream_destroy_duplex(struct snd_dice *dice)
340{ 349{
341 snd_dice_transaction_clear_enable(dice); 350 snd_dice_transaction_clear_enable(dice);
342 351
343 stop_stream(dice, &dice->tx_stream);
344 destroy_stream(dice, &dice->tx_stream); 352 destroy_stream(dice, &dice->tx_stream);
345
346 stop_stream(dice, &dice->rx_stream);
347 destroy_stream(dice, &dice->rx_stream); 353 destroy_stream(dice, &dice->rx_stream);
348 354
349 dice->substreams_counter = 0; 355 dice->substreams_counter = 0;
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 90d8f40ff727..70a111d7f428 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -226,11 +226,20 @@ static void dice_card_strings(struct snd_dice *dice)
226 strcpy(card->mixername, "DICE"); 226 strcpy(card->mixername, "DICE");
227} 227}
228 228
229/*
230 * This module releases the FireWire unit data after all ALSA character devices
231 * are released by applications. This is for releasing stream data or finishing
232 * transactions safely. Thus at returning from .remove(), this module still keep
233 * references for the unit.
234 */
229static void dice_card_free(struct snd_card *card) 235static void dice_card_free(struct snd_card *card)
230{ 236{
231 struct snd_dice *dice = card->private_data; 237 struct snd_dice *dice = card->private_data;
232 238
239 snd_dice_stream_destroy_duplex(dice);
233 snd_dice_transaction_destroy(dice); 240 snd_dice_transaction_destroy(dice);
241 fw_unit_put(dice->unit);
242
234 mutex_destroy(&dice->mutex); 243 mutex_destroy(&dice->mutex);
235} 244}
236 245
@@ -251,7 +260,7 @@ static int dice_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
251 260
252 dice = card->private_data; 261 dice = card->private_data;
253 dice->card = card; 262 dice->card = card;
254 dice->unit = unit; 263 dice->unit = fw_unit_get(unit);
255 card->private_free = dice_card_free; 264 card->private_free = dice_card_free;
256 265
257 spin_lock_init(&dice->lock); 266 spin_lock_init(&dice->lock);
@@ -305,10 +314,7 @@ static void dice_remove(struct fw_unit *unit)
305{ 314{
306 struct snd_dice *dice = dev_get_drvdata(&unit->device); 315 struct snd_dice *dice = dev_get_drvdata(&unit->device);
307 316
308 snd_card_disconnect(dice->card); 317 /* No need to wait for releasing card object in this context. */
309
310 snd_dice_stream_destroy_duplex(dice);
311
312 snd_card_free_when_closed(dice->card); 318 snd_card_free_when_closed(dice->card);
313} 319}
314 320
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 3e2ed8e82cbc..2682e7e3e5c9 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -173,11 +173,23 @@ end:
173 return err; 173 return err;
174} 174}
175 175
176/*
177 * This module releases the FireWire unit data after all ALSA character devices
178 * are released by applications. This is for releasing stream data or finishing
179 * transactions safely. Thus at returning from .remove(), this module still keep
180 * references for the unit.
181 */
176static void 182static void
177efw_card_free(struct snd_card *card) 183efw_card_free(struct snd_card *card)
178{ 184{
179 struct snd_efw *efw = card->private_data; 185 struct snd_efw *efw = card->private_data;
180 186
187 snd_efw_stream_destroy_duplex(efw);
188 snd_efw_transaction_remove_instance(efw);
189 fw_unit_put(efw->unit);
190
191 kfree(efw->resp_buf);
192
181 if (efw->card_index >= 0) { 193 if (efw->card_index >= 0) {
182 mutex_lock(&devices_mutex); 194 mutex_lock(&devices_mutex);
183 clear_bit(efw->card_index, devices_used); 195 clear_bit(efw->card_index, devices_used);
@@ -185,7 +197,6 @@ efw_card_free(struct snd_card *card)
185 } 197 }
186 198
187 mutex_destroy(&efw->mutex); 199 mutex_destroy(&efw->mutex);
188 kfree(efw->resp_buf);
189} 200}
190 201
191static int 202static int
@@ -218,7 +229,7 @@ efw_probe(struct fw_unit *unit,
218 card->private_free = efw_card_free; 229 card->private_free = efw_card_free;
219 230
220 efw->card = card; 231 efw->card = card;
221 efw->unit = unit; 232 efw->unit = fw_unit_get(unit);
222 mutex_init(&efw->mutex); 233 mutex_init(&efw->mutex);
223 spin_lock_init(&efw->lock); 234 spin_lock_init(&efw->lock);
224 init_waitqueue_head(&efw->hwdep_wait); 235 init_waitqueue_head(&efw->hwdep_wait);
@@ -289,10 +300,7 @@ static void efw_remove(struct fw_unit *unit)
289{ 300{
290 struct snd_efw *efw = dev_get_drvdata(&unit->device); 301 struct snd_efw *efw = dev_get_drvdata(&unit->device);
291 302
292 snd_efw_stream_destroy_duplex(efw); 303 /* No need to wait for releasing card object in this context. */
293 snd_efw_transaction_remove_instance(efw);
294
295 snd_card_disconnect(efw->card);
296 snd_card_free_when_closed(efw->card); 304 snd_card_free_when_closed(efw->card);
297} 305}
298 306
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index 4f440e163667..c55db1bddc80 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -100,17 +100,22 @@ end:
100 return err; 100 return err;
101} 101}
102 102
103/*
104 * This function should be called before starting the stream or after stopping
105 * the streams.
106 */
103static void 107static void
104destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream) 108destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream)
105{ 109{
106 stop_stream(efw, stream); 110 struct cmp_connection *conn;
107
108 amdtp_stream_destroy(stream);
109 111
110 if (stream == &efw->tx_stream) 112 if (stream == &efw->tx_stream)
111 cmp_connection_destroy(&efw->out_conn); 113 conn = &efw->out_conn;
112 else 114 else
113 cmp_connection_destroy(&efw->in_conn); 115 conn = &efw->in_conn;
116
117 amdtp_stream_destroy(stream);
118 cmp_connection_destroy(&efw->out_conn);
114} 119}
115 120
116static int 121static int
@@ -319,12 +324,8 @@ void snd_efw_stream_update_duplex(struct snd_efw *efw)
319 324
320void snd_efw_stream_destroy_duplex(struct snd_efw *efw) 325void snd_efw_stream_destroy_duplex(struct snd_efw *efw)
321{ 326{
322 mutex_lock(&efw->mutex);
323
324 destroy_stream(efw, &efw->rx_stream); 327 destroy_stream(efw, &efw->rx_stream);
325 destroy_stream(efw, &efw->tx_stream); 328 destroy_stream(efw, &efw->tx_stream);
326
327 mutex_unlock(&efw->mutex);
328} 329}
329 330
330void snd_efw_stream_lock_changed(struct snd_efw *efw) 331void snd_efw_stream_lock_changed(struct snd_efw *efw)
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
index bda845afb470..29ccb3637164 100644
--- a/sound/firewire/oxfw/oxfw-stream.c
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -337,6 +337,10 @@ void snd_oxfw_stream_stop_simplex(struct snd_oxfw *oxfw,
337 stop_stream(oxfw, stream); 337 stop_stream(oxfw, stream);
338} 338}
339 339
340/*
341 * This function should be called before starting the stream or after stopping
342 * the streams.
343 */
340void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw, 344void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw,
341 struct amdtp_stream *stream) 345 struct amdtp_stream *stream)
342{ 346{
@@ -347,8 +351,6 @@ void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw,
347 else 351 else
348 conn = &oxfw->in_conn; 352 conn = &oxfw->in_conn;
349 353
350 stop_stream(oxfw, stream);
351
352 amdtp_stream_destroy(stream); 354 amdtp_stream_destroy(stream);
353 cmp_connection_destroy(conn); 355 cmp_connection_destroy(conn);
354} 356}
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 60e5cad0531a..8c6ce019f437 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -104,11 +104,23 @@ end:
104 return err; 104 return err;
105} 105}
106 106
107/*
108 * This module releases the FireWire unit data after all ALSA character devices
109 * are released by applications. This is for releasing stream data or finishing
110 * transactions safely. Thus at returning from .remove(), this module still keep
111 * references for the unit.
112 */
107static void oxfw_card_free(struct snd_card *card) 113static void oxfw_card_free(struct snd_card *card)
108{ 114{
109 struct snd_oxfw *oxfw = card->private_data; 115 struct snd_oxfw *oxfw = card->private_data;
110 unsigned int i; 116 unsigned int i;
111 117
118 snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
119 if (oxfw->has_output)
120 snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
121
122 fw_unit_put(oxfw->unit);
123
112 for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) { 124 for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
113 kfree(oxfw->tx_stream_formats[i]); 125 kfree(oxfw->tx_stream_formats[i]);
114 kfree(oxfw->rx_stream_formats[i]); 126 kfree(oxfw->rx_stream_formats[i]);
@@ -136,7 +148,7 @@ static int oxfw_probe(struct fw_unit *unit,
136 oxfw = card->private_data; 148 oxfw = card->private_data;
137 oxfw->card = card; 149 oxfw->card = card;
138 mutex_init(&oxfw->mutex); 150 mutex_init(&oxfw->mutex);
139 oxfw->unit = unit; 151 oxfw->unit = fw_unit_get(unit);
140 oxfw->device_info = (const struct device_info *)id->driver_data; 152 oxfw->device_info = (const struct device_info *)id->driver_data;
141 spin_lock_init(&oxfw->lock); 153 spin_lock_init(&oxfw->lock);
142 init_waitqueue_head(&oxfw->hwdep_wait); 154 init_waitqueue_head(&oxfw->hwdep_wait);
@@ -212,12 +224,7 @@ static void oxfw_remove(struct fw_unit *unit)
212{ 224{
213 struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device); 225 struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device);
214 226
215 snd_card_disconnect(oxfw->card); 227 /* No need to wait for releasing card object in this context. */
216
217 snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
218 if (oxfw->has_output)
219 snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
220
221 snd_card_free_when_closed(oxfw->card); 228 snd_card_free_when_closed(oxfw->card);
222} 229}
223 230
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index dfcb5e929f9f..a2ce773bdc62 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -961,7 +961,6 @@ static int azx_alloc_cmd_io(struct azx *chip)
961 dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n"); 961 dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
962 return err; 962 return err;
963} 963}
964EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
965 964
966static void azx_init_cmd_io(struct azx *chip) 965static void azx_init_cmd_io(struct azx *chip)
967{ 966{
@@ -1026,7 +1025,6 @@ static void azx_init_cmd_io(struct azx *chip)
1026 azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN); 1025 azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
1027 spin_unlock_irq(&chip->reg_lock); 1026 spin_unlock_irq(&chip->reg_lock);
1028} 1027}
1029EXPORT_SYMBOL_GPL(azx_init_cmd_io);
1030 1028
1031static void azx_free_cmd_io(struct azx *chip) 1029static void azx_free_cmd_io(struct azx *chip)
1032{ 1030{
@@ -1036,7 +1034,6 @@ static void azx_free_cmd_io(struct azx *chip)
1036 azx_writeb(chip, CORBCTL, 0); 1034 azx_writeb(chip, CORBCTL, 0);
1037 spin_unlock_irq(&chip->reg_lock); 1035 spin_unlock_irq(&chip->reg_lock);
1038} 1036}
1039EXPORT_SYMBOL_GPL(azx_free_cmd_io);
1040 1037
1041static unsigned int azx_command_addr(u32 cmd) 1038static unsigned int azx_command_addr(u32 cmd)
1042{ 1039{
@@ -1316,7 +1313,6 @@ static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1316 else 1313 else
1317 return azx_corb_send_cmd(bus, val); 1314 return azx_corb_send_cmd(bus, val);
1318} 1315}
1319EXPORT_SYMBOL_GPL(azx_send_cmd);
1320 1316
1321/* get a response */ 1317/* get a response */
1322static unsigned int azx_get_response(struct hda_bus *bus, 1318static unsigned int azx_get_response(struct hda_bus *bus,
@@ -1330,7 +1326,6 @@ static unsigned int azx_get_response(struct hda_bus *bus,
1330 else 1326 else
1331 return azx_rirb_get_response(bus, addr); 1327 return azx_rirb_get_response(bus, addr);
1332} 1328}
1333EXPORT_SYMBOL_GPL(azx_get_response);
1334 1329
1335#ifdef CONFIG_SND_HDA_DSP_LOADER 1330#ifdef CONFIG_SND_HDA_DSP_LOADER
1336/* 1331/*
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 36d2f20db7a4..4ca3d5d02436 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1966,7 +1966,7 @@ static const struct pci_device_id azx_ids[] = {
1966 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM }, 1966 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
1967 /* Panther Point */ 1967 /* Panther Point */
1968 { PCI_DEVICE(0x8086, 0x1e20), 1968 { PCI_DEVICE(0x8086, 0x1e20),
1969 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 1969 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
1970 /* Lynx Point */ 1970 /* Lynx Point */
1971 { PCI_DEVICE(0x8086, 0x8c20), 1971 { PCI_DEVICE(0x8086, 0x8c20),
1972 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 1972 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 227990bc02e3..375e94f4cf52 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -329,8 +329,8 @@ static int hda_tegra_init_chip(struct azx *chip, struct platform_device *pdev)
329 329
330 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 330 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
331 hda->regs = devm_ioremap_resource(dev, res); 331 hda->regs = devm_ioremap_resource(dev, res);
332 if (IS_ERR(chip->remap_addr)) 332 if (IS_ERR(hda->regs))
333 return PTR_ERR(chip->remap_addr); 333 return PTR_ERR(hda->regs);
334 334
335 chip->remap_addr = hda->regs + HDA_BAR0; 335 chip->remap_addr = hda->regs + HDA_BAR0;
336 chip->addr = res->start + HDA_BAR0; 336 chip->addr = res->start + HDA_BAR0;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ddb93083a2af..b2b24a8b3dac 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4937,6 +4937,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4937 SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED), 4937 SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
4938 SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY), 4938 SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
4939 /* ALC282 */ 4939 /* ALC282 */
4940 SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4940 SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4941 SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4941 SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4942 SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4942 SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED), 4943 SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 6d36c5b78805..87eff3173ce9 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -79,6 +79,7 @@ enum {
79 STAC_ALIENWARE_M17X, 79 STAC_ALIENWARE_M17X,
80 STAC_92HD89XX_HP_FRONT_JACK, 80 STAC_92HD89XX_HP_FRONT_JACK,
81 STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK, 81 STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
82 STAC_92HD73XX_ASUS_MOBO,
82 STAC_92HD73XX_MODELS 83 STAC_92HD73XX_MODELS
83}; 84};
84 85
@@ -1911,7 +1912,18 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
1911 [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = { 1912 [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = {
1912 .type = HDA_FIXUP_PINS, 1913 .type = HDA_FIXUP_PINS,
1913 .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs, 1914 .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs,
1914 } 1915 },
1916 [STAC_92HD73XX_ASUS_MOBO] = {
1917 .type = HDA_FIXUP_PINS,
1918 .v.pins = (const struct hda_pintbl[]) {
1919 /* enable 5.1 and SPDIF out */
1920 { 0x0c, 0x01014411 },
1921 { 0x0d, 0x01014410 },
1922 { 0x0e, 0x01014412 },
1923 { 0x22, 0x014b1180 },
1924 { }
1925 }
1926 },
1915}; 1927};
1916 1928
1917static const struct hda_model_fixup stac92hd73xx_models[] = { 1929static const struct hda_model_fixup stac92hd73xx_models[] = {
@@ -1923,6 +1935,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = {
1923 { .id = STAC_DELL_M6_BOTH, .name = "dell-m6" }, 1935 { .id = STAC_DELL_M6_BOTH, .name = "dell-m6" },
1924 { .id = STAC_DELL_EQ, .name = "dell-eq" }, 1936 { .id = STAC_DELL_EQ, .name = "dell-eq" },
1925 { .id = STAC_ALIENWARE_M17X, .name = "alienware" }, 1937 { .id = STAC_ALIENWARE_M17X, .name = "alienware" },
1938 { .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" },
1926 {} 1939 {}
1927}; 1940};
1928 1941
@@ -1975,6 +1988,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
1975 "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK), 1988 "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
1976 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17, 1989 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
1977 "unknown HP", STAC_92HD89XX_HP_FRONT_JACK), 1990 "unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
1991 SND_PCI_QUIRK(PCI_VENDOR_ID_ASUSTEK, 0x83f8, "ASUS AT4NM10",
1992 STAC_92HD73XX_ASUS_MOBO),
1978 {} /* terminator */ 1993 {} /* terminator */
1979}; 1994};
1980 1995
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 2c363fdca9fd..ca67f896d117 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6082,6 +6082,9 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream)
6082 snd_pcm_hw_constraint_minmax(runtime, 6082 snd_pcm_hw_constraint_minmax(runtime,
6083 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 6083 SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
6084 64, 8192); 6084 64, 8192);
6085 snd_pcm_hw_constraint_minmax(runtime,
6086 SNDRV_PCM_HW_PARAM_PERIODS,
6087 2, 2);
6085 break; 6088 break;
6086 } 6089 }
6087 6090
@@ -6156,6 +6159,9 @@ static int snd_hdspm_capture_open(struct snd_pcm_substream *substream)
6156 snd_pcm_hw_constraint_minmax(runtime, 6159 snd_pcm_hw_constraint_minmax(runtime,
6157 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 6160 SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
6158 64, 8192); 6161 64, 8192);
6162 snd_pcm_hw_constraint_minmax(runtime,
6163 SNDRV_PCM_HW_PARAM_PERIODS,
6164 2, 2);
6159 break; 6165 break;
6160 } 6166 }
6161 6167
diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c
index d6fa9d5514e1..7e21e8f85885 100644
--- a/sound/soc/intel/sst-haswell-pcm.c
+++ b/sound/soc/intel/sst-haswell-pcm.c
@@ -91,7 +91,8 @@ static const struct snd_pcm_hardware hsw_pcm_hardware = {
91 SNDRV_PCM_INFO_INTERLEAVED | 91 SNDRV_PCM_INFO_INTERLEAVED |
92 SNDRV_PCM_INFO_PAUSE | 92 SNDRV_PCM_INFO_PAUSE |
93 SNDRV_PCM_INFO_RESUME | 93 SNDRV_PCM_INFO_RESUME |
94 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP, 94 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
95 SNDRV_PCM_INFO_DRAIN_TRIGGER,
95 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | 96 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
96 SNDRV_PCM_FMTBIT_S32_LE, 97 SNDRV_PCM_FMTBIT_S32_LE,
97 .period_bytes_min = PAGE_SIZE, 98 .period_bytes_min = PAGE_SIZE,
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 4864392bfcba..c9917ca5de1a 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -151,7 +151,7 @@ static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substrea
151 hw.info |= SNDRV_PCM_INFO_BATCH; 151 hw.info |= SNDRV_PCM_INFO_BATCH;
152 152
153 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 153 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
154 addr_widths = dma_caps.dstn_addr_widths; 154 addr_widths = dma_caps.dst_addr_widths;
155 else 155 else
156 addr_widths = dma_caps.src_addr_widths; 156 addr_widths = dma_caps.src_addr_widths;
157 } 157 }
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 03fed6611d9e..2ed260b10f6d 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -303,6 +303,11 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
303 return err; 303 return err;
304 } 304 }
305 305
306 /* Don't check the sample rate for devices which we know don't
307 * support reading */
308 if (snd_usb_get_sample_rate_quirk(chip))
309 return 0;
310
306 if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR, 311 if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR,
307 USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN, 312 USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN,
308 UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep, 313 UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 99b63a7902f3..81b7da8e56d3 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -302,14 +302,17 @@ static void line6_data_received(struct urb *urb)
302/* 302/*
303 Read data from device. 303 Read data from device.
304*/ 304*/
305int line6_read_data(struct usb_line6 *line6, int address, void *data, 305int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
306 size_t datalen) 306 unsigned datalen)
307{ 307{
308 struct usb_device *usbdev = line6->usbdev; 308 struct usb_device *usbdev = line6->usbdev;
309 int ret; 309 int ret;
310 unsigned char len; 310 unsigned char len;
311 unsigned count; 311 unsigned count;
312 312
313 if (address > 0xffff || datalen > 0xff)
314 return -EINVAL;
315
313 /* query the serial number: */ 316 /* query the serial number: */
314 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67, 317 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
315 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 318 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
@@ -370,14 +373,17 @@ EXPORT_SYMBOL_GPL(line6_read_data);
370/* 373/*
371 Write data to device. 374 Write data to device.
372*/ 375*/
373int line6_write_data(struct usb_line6 *line6, int address, void *data, 376int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
374 size_t datalen) 377 unsigned datalen)
375{ 378{
376 struct usb_device *usbdev = line6->usbdev; 379 struct usb_device *usbdev = line6->usbdev;
377 int ret; 380 int ret;
378 unsigned char status; 381 unsigned char status;
379 int count; 382 int count;
380 383
384 if (address > 0xffff || datalen > 0xffff)
385 return -EINVAL;
386
381 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67, 387 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
382 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 388 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
383 0x0022, address, data, datalen, 389 0x0022, address, data, datalen,
diff --git a/sound/usb/line6/driver.h b/sound/usb/line6/driver.h
index 5d20294d64f4..7da643e79e3b 100644
--- a/sound/usb/line6/driver.h
+++ b/sound/usb/line6/driver.h
@@ -147,8 +147,8 @@ struct usb_line6 {
147 147
148extern char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1, 148extern char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1,
149 int code2, int size); 149 int code2, int size);
150extern int line6_read_data(struct usb_line6 *line6, int address, void *data, 150extern int line6_read_data(struct usb_line6 *line6, unsigned address,
151 size_t datalen); 151 void *data, unsigned datalen);
152extern int line6_read_serial_number(struct usb_line6 *line6, 152extern int line6_read_serial_number(struct usb_line6 *line6,
153 u32 *serial_number); 153 u32 *serial_number);
154extern int line6_send_raw_message_async(struct usb_line6 *line6, 154extern int line6_send_raw_message_async(struct usb_line6 *line6,
@@ -161,8 +161,8 @@ extern void line6_start_timer(struct timer_list *timer, unsigned long msecs,
161 void (*function)(unsigned long), 161 void (*function)(unsigned long),
162 unsigned long data); 162 unsigned long data);
163extern int line6_version_request_async(struct usb_line6 *line6); 163extern int line6_version_request_async(struct usb_line6 *line6);
164extern int line6_write_data(struct usb_line6 *line6, int address, void *data, 164extern int line6_write_data(struct usb_line6 *line6, unsigned address,
165 size_t datalen); 165 void *data, unsigned datalen);
166 166
167int line6_probe(struct usb_interface *interface, 167int line6_probe(struct usb_interface *interface,
168 const struct usb_device_id *id, 168 const struct usb_device_id *id,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index a7398412310b..753a47de8459 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1111,6 +1111,11 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
1111 } 1111 }
1112} 1112}
1113 1113
1114bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
1115{
1116 /* MS Lifecam HD-5000 doesn't support reading the sample rate. */
1117 return chip->usb_id == USB_ID(0x045E, 0x076D);
1118}
1114 1119
1115/* Marantz/Denon USB DACs need a vendor cmd to switch 1120/* Marantz/Denon USB DACs need a vendor cmd to switch
1116 * between PCM and native DSD mode 1121 * between PCM and native DSD mode
@@ -1122,6 +1127,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
1122 int err; 1127 int err;
1123 1128
1124 switch (subs->stream->chip->usb_id) { 1129 switch (subs->stream->chip->usb_id) {
1130 case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
1125 case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */ 1131 case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
1126 case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */ 1132 case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
1127 1133
@@ -1201,6 +1207,7 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
1201 (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) { 1207 (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1202 1208
1203 switch (le16_to_cpu(dev->descriptor.idProduct)) { 1209 switch (le16_to_cpu(dev->descriptor.idProduct)) {
1210 case 0x1003: /* Denon DA300-USB */
1204 case 0x3005: /* Marantz HD-DAC1 */ 1211 case 0x3005: /* Marantz HD-DAC1 */
1205 case 0x3006: /* Marantz SA-14S1 */ 1212 case 0x3006: /* Marantz SA-14S1 */
1206 mdelay(20); 1213 mdelay(20);
@@ -1262,6 +1269,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1262 1269
1263 /* Denon/Marantz devices with USB DAC functionality */ 1270 /* Denon/Marantz devices with USB DAC functionality */
1264 switch (chip->usb_id) { 1271 switch (chip->usb_id) {
1272 case USB_ID(0x154e, 0x1003): /* Denon DA300-USB */
1265 case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */ 1273 case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
1266 case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */ 1274 case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
1267 if (fp->altsetting == 2) 1275 if (fp->altsetting == 2)
diff --git a/sound/usb/quirks.h b/sound/usb/quirks.h
index 1b862386577d..2cd71ed1201f 100644
--- a/sound/usb/quirks.h
+++ b/sound/usb/quirks.h
@@ -21,6 +21,8 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
21void snd_usb_set_format_quirk(struct snd_usb_substream *subs, 21void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
22 struct audioformat *fmt); 22 struct audioformat *fmt);
23 23
24bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip);
25
24int snd_usb_is_big_endian_format(struct snd_usb_audio *chip, 26int snd_usb_is_big_endian_format(struct snd_usb_audio *chip,
25 struct audioformat *fp); 27 struct audioformat *fp);
26 28
diff --git a/tools/lguest/Makefile b/tools/lguest/Makefile
index 97bca4871ea3..a107b5e4da13 100644
--- a/tools/lguest/Makefile
+++ b/tools/lguest/Makefile
@@ -1,7 +1,13 @@
1# This creates the demonstration utility "lguest" which runs a Linux guest. 1# This creates the demonstration utility "lguest" which runs a Linux guest.
2CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE 2CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE -Iinclude
3 3
4all: lguest 4all: lguest
5 5
6include/linux/virtio_types.h: ../../include/uapi/linux/virtio_types.h
7 mkdir -p include/linux 2>&1 || true
8 ln -sf ../../../../include/uapi/linux/virtio_types.h $@
9
10lguest: include/linux/virtio_types.h
11
6clean: 12clean:
7 rm -f lguest 13 rm -f lguest
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c
index 32cf2ce15d69..e44052483ed9 100644
--- a/tools/lguest/lguest.c
+++ b/tools/lguest/lguest.c
@@ -41,6 +41,8 @@
41#include <signal.h> 41#include <signal.h>
42#include <pwd.h> 42#include <pwd.h>
43#include <grp.h> 43#include <grp.h>
44#include <sys/user.h>
45#include <linux/pci_regs.h>
44 46
45#ifndef VIRTIO_F_ANY_LAYOUT 47#ifndef VIRTIO_F_ANY_LAYOUT
46#define VIRTIO_F_ANY_LAYOUT 27 48#define VIRTIO_F_ANY_LAYOUT 27
@@ -61,12 +63,19 @@ typedef uint16_t u16;
61typedef uint8_t u8; 63typedef uint8_t u8;
62/*:*/ 64/*:*/
63 65
64#include <linux/virtio_config.h> 66#define VIRTIO_CONFIG_NO_LEGACY
65#include <linux/virtio_net.h> 67#define VIRTIO_PCI_NO_LEGACY
66#include <linux/virtio_blk.h> 68#define VIRTIO_BLK_NO_LEGACY
67#include <linux/virtio_console.h> 69#define VIRTIO_NET_NO_LEGACY
68#include <linux/virtio_rng.h> 70
71/* Use in-kernel ones, which defines VIRTIO_F_VERSION_1 */
72#include "../../include/uapi/linux/virtio_config.h"
73#include "../../include/uapi/linux/virtio_net.h"
74#include "../../include/uapi/linux/virtio_blk.h"
75#include "../../include/uapi/linux/virtio_console.h"
76#include "../../include/uapi/linux/virtio_rng.h"
69#include <linux/virtio_ring.h> 77#include <linux/virtio_ring.h>
78#include "../../include/uapi/linux/virtio_pci.h"
70#include <asm/bootparam.h> 79#include <asm/bootparam.h>
71#include "../../include/linux/lguest_launcher.h" 80#include "../../include/linux/lguest_launcher.h"
72 81
@@ -91,13 +100,16 @@ static bool verbose;
91/* The pointer to the start of guest memory. */ 100/* The pointer to the start of guest memory. */
92static void *guest_base; 101static void *guest_base;
93/* The maximum guest physical address allowed, and maximum possible. */ 102/* The maximum guest physical address allowed, and maximum possible. */
94static unsigned long guest_limit, guest_max; 103static unsigned long guest_limit, guest_max, guest_mmio;
95/* The /dev/lguest file descriptor. */ 104/* The /dev/lguest file descriptor. */
96static int lguest_fd; 105static int lguest_fd;
97 106
98/* a per-cpu variable indicating whose vcpu is currently running */ 107/* a per-cpu variable indicating whose vcpu is currently running */
99static unsigned int __thread cpu_id; 108static unsigned int __thread cpu_id;
100 109
110/* 5 bit device number in the PCI_CONFIG_ADDR => 32 only */
111#define MAX_PCI_DEVICES 32
112
101/* This is our list of devices. */ 113/* This is our list of devices. */
102struct device_list { 114struct device_list {
103 /* Counter to assign interrupt numbers. */ 115 /* Counter to assign interrupt numbers. */
@@ -106,30 +118,50 @@ struct device_list {
106 /* Counter to print out convenient device numbers. */ 118 /* Counter to print out convenient device numbers. */
107 unsigned int device_num; 119 unsigned int device_num;
108 120
109 /* The descriptor page for the devices. */ 121 /* PCI devices. */
110 u8 *descpage; 122 struct device *pci[MAX_PCI_DEVICES];
111
112 /* A single linked list of devices. */
113 struct device *dev;
114 /* And a pointer to the last device for easy append. */
115 struct device *lastdev;
116}; 123};
117 124
118/* The list of Guest devices, based on command line arguments. */ 125/* The list of Guest devices, based on command line arguments. */
119static struct device_list devices; 126static struct device_list devices;
120 127
121/* The device structure describes a single device. */ 128struct virtio_pci_cfg_cap {
122struct device { 129 struct virtio_pci_cap cap;
123 /* The linked-list pointer. */ 130 u32 pci_cfg_data; /* Data for BAR access. */
124 struct device *next; 131};
125 132
126 /* The device's descriptor, as mapped into the Guest. */ 133struct virtio_pci_mmio {
127 struct lguest_device_desc *desc; 134 struct virtio_pci_common_cfg cfg;
135 u16 notify;
136 u8 isr;
137 u8 padding;
138 /* Device-specific configuration follows this. */
139};
128 140
129 /* We can't trust desc values once Guest has booted: we use these. */ 141/* This is the layout (little-endian) of the PCI config space. */
130 unsigned int feature_len; 142struct pci_config {
131 unsigned int num_vq; 143 u16 vendor_id, device_id;
144 u16 command, status;
145 u8 revid, prog_if, subclass, class;
146 u8 cacheline_size, lat_timer, header_type, bist;
147 u32 bar[6];
148 u32 cardbus_cis_ptr;
149 u16 subsystem_vendor_id, subsystem_device_id;
150 u32 expansion_rom_addr;
151 u8 capabilities, reserved1[3];
152 u32 reserved2;
153 u8 irq_line, irq_pin, min_grant, max_latency;
154
155 /* Now, this is the linked capability list. */
156 struct virtio_pci_cap common;
157 struct virtio_pci_notify_cap notify;
158 struct virtio_pci_cap isr;
159 struct virtio_pci_cap device;
160 struct virtio_pci_cfg_cap cfg_access;
161};
132 162
163/* The device structure describes a single device. */
164struct device {
133 /* The name of this device, for --verbose. */ 165 /* The name of this device, for --verbose. */
134 const char *name; 166 const char *name;
135 167
@@ -139,6 +171,25 @@ struct device {
139 /* Is it operational */ 171 /* Is it operational */
140 bool running; 172 bool running;
141 173
174 /* Has it written FEATURES_OK but not re-checked it? */
175 bool wrote_features_ok;
176
177 /* PCI configuration */
178 union {
179 struct pci_config config;
180 u32 config_words[sizeof(struct pci_config) / sizeof(u32)];
181 };
182
183 /* Features we offer, and those accepted. */
184 u64 features, features_accepted;
185
186 /* Device-specific config hangs off the end of this. */
187 struct virtio_pci_mmio *mmio;
188
189 /* PCI MMIO resources (all in BAR0) */
190 size_t mmio_size;
191 u32 mmio_addr;
192
142 /* Device-specific data. */ 193 /* Device-specific data. */
143 void *priv; 194 void *priv;
144}; 195};
@@ -150,12 +201,15 @@ struct virtqueue {
150 /* Which device owns me. */ 201 /* Which device owns me. */
151 struct device *dev; 202 struct device *dev;
152 203
153 /* The configuration for this queue. */ 204 /* Name for printing errors. */
154 struct lguest_vqconfig config; 205 const char *name;
155 206
156 /* The actual ring of buffers. */ 207 /* The actual ring of buffers. */
157 struct vring vring; 208 struct vring vring;
158 209
210 /* The information about this virtqueue (we only use queue_size on) */
211 struct virtio_pci_common_cfg pci_config;
212
159 /* Last available index we saw. */ 213 /* Last available index we saw. */
160 u16 last_avail_idx; 214 u16 last_avail_idx;
161 215
@@ -199,6 +253,16 @@ static struct termios orig_term;
199#define le32_to_cpu(v32) (v32) 253#define le32_to_cpu(v32) (v32)
200#define le64_to_cpu(v64) (v64) 254#define le64_to_cpu(v64) (v64)
201 255
256/*
257 * A real device would ignore weird/non-compliant driver behaviour. We
258 * stop and flag it, to help debugging Linux problems.
259 */
260#define bad_driver(d, fmt, ...) \
261 errx(1, "%s: bad driver: " fmt, (d)->name, ## __VA_ARGS__)
262#define bad_driver_vq(vq, fmt, ...) \
263 errx(1, "%s vq %s: bad driver: " fmt, (vq)->dev->name, \
264 vq->name, ## __VA_ARGS__)
265
202/* Is this iovec empty? */ 266/* Is this iovec empty? */
203static bool iov_empty(const struct iovec iov[], unsigned int num_iov) 267static bool iov_empty(const struct iovec iov[], unsigned int num_iov)
204{ 268{
@@ -211,7 +275,8 @@ static bool iov_empty(const struct iovec iov[], unsigned int num_iov)
211} 275}
212 276
213/* Take len bytes from the front of this iovec. */ 277/* Take len bytes from the front of this iovec. */
214static void iov_consume(struct iovec iov[], unsigned num_iov, 278static void iov_consume(struct device *d,
279 struct iovec iov[], unsigned num_iov,
215 void *dest, unsigned len) 280 void *dest, unsigned len)
216{ 281{
217 unsigned int i; 282 unsigned int i;
@@ -229,14 +294,7 @@ static void iov_consume(struct iovec iov[], unsigned num_iov,
229 len -= used; 294 len -= used;
230 } 295 }
231 if (len != 0) 296 if (len != 0)
232 errx(1, "iovec too short!"); 297 bad_driver(d, "iovec too short!");
233}
234
235/* The device virtqueue descriptors are followed by feature bitmasks. */
236static u8 *get_feature_bits(struct device *dev)
237{
238 return (u8 *)(dev->desc + 1)
239 + dev->num_vq * sizeof(struct lguest_vqconfig);
240} 298}
241 299
242/*L:100 300/*L:100
@@ -309,14 +367,20 @@ static void *map_zeroed_pages(unsigned int num)
309 return addr + getpagesize(); 367 return addr + getpagesize();
310} 368}
311 369
312/* Get some more pages for a device. */ 370/* Get some bytes which won't be mapped into the guest. */
313static void *get_pages(unsigned int num) 371static unsigned long get_mmio_region(size_t size)
314{ 372{
315 void *addr = from_guest_phys(guest_limit); 373 unsigned long addr = guest_mmio;
374 size_t i;
375
376 if (!size)
377 return addr;
378
379 /* Size has to be a power of 2 (and multiple of 16) */
380 for (i = 1; i < size; i <<= 1);
381
382 guest_mmio += i;
316 383
317 guest_limit += num * getpagesize();
318 if (guest_limit > guest_max)
319 errx(1, "Not enough memory for devices");
320 return addr; 384 return addr;
321} 385}
322 386
@@ -547,9 +611,11 @@ static void tell_kernel(unsigned long start)
547{ 611{
548 unsigned long args[] = { LHREQ_INITIALIZE, 612 unsigned long args[] = { LHREQ_INITIALIZE,
549 (unsigned long)guest_base, 613 (unsigned long)guest_base,
550 guest_limit / getpagesize(), start }; 614 guest_limit / getpagesize(), start,
551 verbose("Guest: %p - %p (%#lx)\n", 615 (guest_mmio+getpagesize()-1) / getpagesize() };
552 guest_base, guest_base + guest_limit, guest_limit); 616 verbose("Guest: %p - %p (%#lx, MMIO %#lx)\n",
617 guest_base, guest_base + guest_limit,
618 guest_limit, guest_mmio);
553 lguest_fd = open_or_die("/dev/lguest", O_RDWR); 619 lguest_fd = open_or_die("/dev/lguest", O_RDWR);
554 if (write(lguest_fd, args, sizeof(args)) < 0) 620 if (write(lguest_fd, args, sizeof(args)) < 0)
555 err(1, "Writing to /dev/lguest"); 621 err(1, "Writing to /dev/lguest");
@@ -564,7 +630,8 @@ static void tell_kernel(unsigned long start)
564 * we have a convenient routine which checks it and exits with an error message 630 * we have a convenient routine which checks it and exits with an error message
565 * if something funny is going on: 631 * if something funny is going on:
566 */ 632 */
567static void *_check_pointer(unsigned long addr, unsigned int size, 633static void *_check_pointer(struct device *d,
634 unsigned long addr, unsigned int size,
568 unsigned int line) 635 unsigned int line)
569{ 636{
570 /* 637 /*
@@ -572,7 +639,8 @@ static void *_check_pointer(unsigned long addr, unsigned int size,
572 * or addr + size wraps around. 639 * or addr + size wraps around.
573 */ 640 */
574 if ((addr + size) > guest_limit || (addr + size) < addr) 641 if ((addr + size) > guest_limit || (addr + size) < addr)
575 errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr); 642 bad_driver(d, "%s:%i: Invalid address %#lx",
643 __FILE__, line, addr);
576 /* 644 /*
577 * We return a pointer for the caller's convenience, now we know it's 645 * We return a pointer for the caller's convenience, now we know it's
578 * safe to use. 646 * safe to use.
@@ -580,14 +648,14 @@ static void *_check_pointer(unsigned long addr, unsigned int size,
580 return from_guest_phys(addr); 648 return from_guest_phys(addr);
581} 649}
582/* A macro which transparently hands the line number to the real function. */ 650/* A macro which transparently hands the line number to the real function. */
583#define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) 651#define check_pointer(d,addr,size) _check_pointer(d, addr, size, __LINE__)
584 652
585/* 653/*
586 * Each buffer in the virtqueues is actually a chain of descriptors. This 654 * Each buffer in the virtqueues is actually a chain of descriptors. This
587 * function returns the next descriptor in the chain, or vq->vring.num if we're 655 * function returns the next descriptor in the chain, or vq->vring.num if we're
588 * at the end. 656 * at the end.
589 */ 657 */
590static unsigned next_desc(struct vring_desc *desc, 658static unsigned next_desc(struct device *d, struct vring_desc *desc,
591 unsigned int i, unsigned int max) 659 unsigned int i, unsigned int max)
592{ 660{
593 unsigned int next; 661 unsigned int next;
@@ -602,7 +670,7 @@ static unsigned next_desc(struct vring_desc *desc,
602 wmb(); 670 wmb();
603 671
604 if (next >= max) 672 if (next >= max)
605 errx(1, "Desc next is %u", next); 673 bad_driver(d, "Desc next is %u", next);
606 674
607 return next; 675 return next;
608} 676}
@@ -613,21 +681,48 @@ static unsigned next_desc(struct vring_desc *desc,
613 */ 681 */
614static void trigger_irq(struct virtqueue *vq) 682static void trigger_irq(struct virtqueue *vq)
615{ 683{
616 unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; 684 unsigned long buf[] = { LHREQ_IRQ, vq->dev->config.irq_line };
617 685
618 /* Don't inform them if nothing used. */ 686 /* Don't inform them if nothing used. */
619 if (!vq->pending_used) 687 if (!vq->pending_used)
620 return; 688 return;
621 vq->pending_used = 0; 689 vq->pending_used = 0;
622 690
623 /* If they don't want an interrupt, don't send one... */ 691 /*
692 * 2.4.7.1:
693 *
694 * If the VIRTIO_F_EVENT_IDX feature bit is not negotiated:
695 * The driver MUST set flags to 0 or 1.
696 */
697 if (vq->vring.avail->flags > 1)
698 bad_driver_vq(vq, "avail->flags = %u\n", vq->vring.avail->flags);
699
700 /*
701 * 2.4.7.2:
702 *
703 * If the VIRTIO_F_EVENT_IDX feature bit is not negotiated:
704 *
705 * - The device MUST ignore the used_event value.
706 * - After the device writes a descriptor index into the used ring:
707 * - If flags is 1, the device SHOULD NOT send an interrupt.
708 * - If flags is 0, the device MUST send an interrupt.
709 */
624 if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) { 710 if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
625 return; 711 return;
626 } 712 }
627 713
714 /*
715 * 4.1.4.5.1:
716 *
717 * If MSI-X capability is disabled, the device MUST set the Queue
718 * Interrupt bit in ISR status before sending a virtqueue notification
719 * to the driver.
720 */
721 vq->dev->mmio->isr = 0x1;
722
628 /* Send the Guest an interrupt tell them we used something up. */ 723 /* Send the Guest an interrupt tell them we used something up. */
629 if (write(lguest_fd, buf, sizeof(buf)) != 0) 724 if (write(lguest_fd, buf, sizeof(buf)) != 0)
630 err(1, "Triggering irq %i", vq->config.irq); 725 err(1, "Triggering irq %i", vq->dev->config.irq_line);
631} 726}
632 727
633/* 728/*
@@ -646,6 +741,14 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
646 struct vring_desc *desc; 741 struct vring_desc *desc;
647 u16 last_avail = lg_last_avail(vq); 742 u16 last_avail = lg_last_avail(vq);
648 743
744 /*
745 * 2.4.7.1:
746 *
747 * The driver MUST handle spurious interrupts from the device.
748 *
749 * That's why this is a while loop.
750 */
751
649 /* There's nothing available? */ 752 /* There's nothing available? */
650 while (last_avail == vq->vring.avail->idx) { 753 while (last_avail == vq->vring.avail->idx) {
651 u64 event; 754 u64 event;
@@ -679,8 +782,8 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
679 782
680 /* Check it isn't doing very strange things with descriptor numbers. */ 783 /* Check it isn't doing very strange things with descriptor numbers. */
681 if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num) 784 if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num)
682 errx(1, "Guest moved used index from %u to %u", 785 bad_driver_vq(vq, "Guest moved used index from %u to %u",
683 last_avail, vq->vring.avail->idx); 786 last_avail, vq->vring.avail->idx);
684 787
685 /* 788 /*
686 * Make sure we read the descriptor number *after* we read the ring 789 * Make sure we read the descriptor number *after* we read the ring
@@ -697,7 +800,7 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
697 800
698 /* If their number is silly, that's a fatal mistake. */ 801 /* If their number is silly, that's a fatal mistake. */
699 if (head >= vq->vring.num) 802 if (head >= vq->vring.num)
700 errx(1, "Guest says index %u is available", head); 803 bad_driver_vq(vq, "Guest says index %u is available", head);
701 804
702 /* When we start there are none of either input nor output. */ 805 /* When we start there are none of either input nor output. */
703 *out_num = *in_num = 0; 806 *out_num = *in_num = 0;
@@ -712,24 +815,73 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
712 * that: no rmb() required. 815 * that: no rmb() required.
713 */ 816 */
714 817
715 /* 818 do {
716 * If this is an indirect entry, then this buffer contains a descriptor 819 /*
717 * table which we handle as if it's any normal descriptor chain. 820 * If this is an indirect entry, then this buffer contains a
718 */ 821 * descriptor table which we handle as if it's any normal
719 if (desc[i].flags & VRING_DESC_F_INDIRECT) { 822 * descriptor chain.
720 if (desc[i].len % sizeof(struct vring_desc)) 823 */
721 errx(1, "Invalid size for indirect buffer table"); 824 if (desc[i].flags & VRING_DESC_F_INDIRECT) {
825 /* 2.4.5.3.1:
826 *
827 * The driver MUST NOT set the VIRTQ_DESC_F_INDIRECT
828 * flag unless the VIRTIO_F_INDIRECT_DESC feature was
829 * negotiated.
830 */
831 if (!(vq->dev->features_accepted &
832 (1<<VIRTIO_RING_F_INDIRECT_DESC)))
833 bad_driver_vq(vq, "vq indirect not negotiated");
722 834
723 max = desc[i].len / sizeof(struct vring_desc); 835 /*
724 desc = check_pointer(desc[i].addr, desc[i].len); 836 * 2.4.5.3.1:
725 i = 0; 837 *
726 } 838 * The driver MUST NOT set the VIRTQ_DESC_F_INDIRECT
839 * flag within an indirect descriptor (ie. only one
840 * table per descriptor).
841 */
842 if (desc != vq->vring.desc)
843 bad_driver_vq(vq, "Indirect within indirect");
844
845 /*
846 * Proposed update VIRTIO-134 spells this out:
847 *
848 * A driver MUST NOT set both VIRTQ_DESC_F_INDIRECT
849 * and VIRTQ_DESC_F_NEXT in flags.
850 */
851 if (desc[i].flags & VRING_DESC_F_NEXT)
852 bad_driver_vq(vq, "indirect and next together");
853
854 if (desc[i].len % sizeof(struct vring_desc))
855 bad_driver_vq(vq,
856 "Invalid size for indirect table");
857 /*
858 * 2.4.5.3.2:
859 *
860 * The device MUST ignore the write-only flag
861 * (flags&VIRTQ_DESC_F_WRITE) in the descriptor that
862 * refers to an indirect table.
863 *
864 * We ignore it here: :)
865 */
866
867 max = desc[i].len / sizeof(struct vring_desc);
868 desc = check_pointer(vq->dev, desc[i].addr, desc[i].len);
869 i = 0;
870
871 /* 2.4.5.3.1:
872 *
873 * A driver MUST NOT create a descriptor chain longer
874 * than the Queue Size of the device.
875 */
876 if (max > vq->pci_config.queue_size)
877 bad_driver_vq(vq,
878 "indirect has too many entries");
879 }
727 880
728 do {
729 /* Grab the first descriptor, and check it's OK. */ 881 /* Grab the first descriptor, and check it's OK. */
730 iov[*out_num + *in_num].iov_len = desc[i].len; 882 iov[*out_num + *in_num].iov_len = desc[i].len;
731 iov[*out_num + *in_num].iov_base 883 iov[*out_num + *in_num].iov_base
732 = check_pointer(desc[i].addr, desc[i].len); 884 = check_pointer(vq->dev, desc[i].addr, desc[i].len);
733 /* If this is an input descriptor, increment that count. */ 885 /* If this is an input descriptor, increment that count. */
734 if (desc[i].flags & VRING_DESC_F_WRITE) 886 if (desc[i].flags & VRING_DESC_F_WRITE)
735 (*in_num)++; 887 (*in_num)++;
@@ -739,14 +891,15 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
739 * to come before any input descriptors. 891 * to come before any input descriptors.
740 */ 892 */
741 if (*in_num) 893 if (*in_num)
742 errx(1, "Descriptor has out after in"); 894 bad_driver_vq(vq,
895 "Descriptor has out after in");
743 (*out_num)++; 896 (*out_num)++;
744 } 897 }
745 898
746 /* If we've got too many, that implies a descriptor loop. */ 899 /* If we've got too many, that implies a descriptor loop. */
747 if (*out_num + *in_num > max) 900 if (*out_num + *in_num > max)
748 errx(1, "Looped descriptor"); 901 bad_driver_vq(vq, "Looped descriptor");
749 } while ((i = next_desc(desc, i, max)) != max); 902 } while ((i = next_desc(vq->dev, desc, i, max)) != max);
750 903
751 return head; 904 return head;
752} 905}
@@ -803,7 +956,7 @@ static void console_input(struct virtqueue *vq)
803 /* Make sure there's a descriptor available. */ 956 /* Make sure there's a descriptor available. */
804 head = wait_for_vq_desc(vq, iov, &out_num, &in_num); 957 head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
805 if (out_num) 958 if (out_num)
806 errx(1, "Output buffers in console in queue?"); 959 bad_driver_vq(vq, "Output buffers in console in queue?");
807 960
808 /* Read into it. This is where we usually wait. */ 961 /* Read into it. This is where we usually wait. */
809 len = readv(STDIN_FILENO, iov, in_num); 962 len = readv(STDIN_FILENO, iov, in_num);
@@ -856,7 +1009,7 @@ static void console_output(struct virtqueue *vq)
856 /* We usually wait in here, for the Guest to give us something. */ 1009 /* We usually wait in here, for the Guest to give us something. */
857 head = wait_for_vq_desc(vq, iov, &out, &in); 1010 head = wait_for_vq_desc(vq, iov, &out, &in);
858 if (in) 1011 if (in)
859 errx(1, "Input buffers in console output queue?"); 1012 bad_driver_vq(vq, "Input buffers in console output queue?");
860 1013
861 /* writev can return a partial write, so we loop here. */ 1014 /* writev can return a partial write, so we loop here. */
862 while (!iov_empty(iov, out)) { 1015 while (!iov_empty(iov, out)) {
@@ -865,7 +1018,7 @@ static void console_output(struct virtqueue *vq)
865 warn("Write to stdout gave %i (%d)", len, errno); 1018 warn("Write to stdout gave %i (%d)", len, errno);
866 break; 1019 break;
867 } 1020 }
868 iov_consume(iov, out, NULL, len); 1021 iov_consume(vq->dev, iov, out, NULL, len);
869 } 1022 }
870 1023
871 /* 1024 /*
@@ -894,7 +1047,7 @@ static void net_output(struct virtqueue *vq)
894 /* We usually wait in here for the Guest to give us a packet. */ 1047 /* We usually wait in here for the Guest to give us a packet. */
895 head = wait_for_vq_desc(vq, iov, &out, &in); 1048 head = wait_for_vq_desc(vq, iov, &out, &in);
896 if (in) 1049 if (in)
897 errx(1, "Input buffers in net output queue?"); 1050 bad_driver_vq(vq, "Input buffers in net output queue?");
898 /* 1051 /*
899 * Send the whole thing through to /dev/net/tun. It expects the exact 1052 * Send the whole thing through to /dev/net/tun. It expects the exact
900 * same format: what a coincidence! 1053 * same format: what a coincidence!
@@ -942,7 +1095,7 @@ static void net_input(struct virtqueue *vq)
942 */ 1095 */
943 head = wait_for_vq_desc(vq, iov, &out, &in); 1096 head = wait_for_vq_desc(vq, iov, &out, &in);
944 if (out) 1097 if (out)
945 errx(1, "Output buffers in net input queue?"); 1098 bad_driver_vq(vq, "Output buffers in net input queue?");
946 1099
947 /* 1100 /*
948 * If it looks like we'll block reading from the tun device, send them 1101 * If it looks like we'll block reading from the tun device, send them
@@ -986,6 +1139,12 @@ static void kill_launcher(int signal)
986 kill(0, SIGTERM); 1139 kill(0, SIGTERM);
987} 1140}
988 1141
1142static void reset_vq_pci_config(struct virtqueue *vq)
1143{
1144 vq->pci_config.queue_size = VIRTQUEUE_NUM;
1145 vq->pci_config.queue_enable = 0;
1146}
1147
989static void reset_device(struct device *dev) 1148static void reset_device(struct device *dev)
990{ 1149{
991 struct virtqueue *vq; 1150 struct virtqueue *vq;
@@ -993,53 +1152,705 @@ static void reset_device(struct device *dev)
993 verbose("Resetting device %s\n", dev->name); 1152 verbose("Resetting device %s\n", dev->name);
994 1153
995 /* Clear any features they've acked. */ 1154 /* Clear any features they've acked. */
996 memset(get_feature_bits(dev) + dev->feature_len, 0, dev->feature_len); 1155 dev->features_accepted = 0;
997 1156
998 /* We're going to be explicitly killing threads, so ignore them. */ 1157 /* We're going to be explicitly killing threads, so ignore them. */
999 signal(SIGCHLD, SIG_IGN); 1158 signal(SIGCHLD, SIG_IGN);
1000 1159
1001 /* Zero out the virtqueues, get rid of their threads */ 1160 /*
1161 * 4.1.4.3.1:
1162 *
1163 * The device MUST present a 0 in queue_enable on reset.
1164 *
1165 * This means we set it here, and reset the saved ones in every vq.
1166 */
1167 dev->mmio->cfg.queue_enable = 0;
1168
1169 /* Get rid of the virtqueue threads */
1002 for (vq = dev->vq; vq; vq = vq->next) { 1170 for (vq = dev->vq; vq; vq = vq->next) {
1171 vq->last_avail_idx = 0;
1172 reset_vq_pci_config(vq);
1003 if (vq->thread != (pid_t)-1) { 1173 if (vq->thread != (pid_t)-1) {
1004 kill(vq->thread, SIGTERM); 1174 kill(vq->thread, SIGTERM);
1005 waitpid(vq->thread, NULL, 0); 1175 waitpid(vq->thread, NULL, 0);
1006 vq->thread = (pid_t)-1; 1176 vq->thread = (pid_t)-1;
1007 } 1177 }
1008 memset(vq->vring.desc, 0,
1009 vring_size(vq->config.num, LGUEST_VRING_ALIGN));
1010 lg_last_avail(vq) = 0;
1011 } 1178 }
1012 dev->running = false; 1179 dev->running = false;
1180 dev->wrote_features_ok = false;
1013 1181
1014 /* Now we care if threads die. */ 1182 /* Now we care if threads die. */
1015 signal(SIGCHLD, (void *)kill_launcher); 1183 signal(SIGCHLD, (void *)kill_launcher);
1016} 1184}
1017 1185
1186static void cleanup_devices(void)
1187{
1188 unsigned int i;
1189
1190 for (i = 1; i < MAX_PCI_DEVICES; i++) {
1191 struct device *d = devices.pci[i];
1192 if (!d)
1193 continue;
1194 reset_device(d);
1195 }
1196
1197 /* If we saved off the original terminal settings, restore them now. */
1198 if (orig_term.c_lflag & (ISIG|ICANON|ECHO))
1199 tcsetattr(STDIN_FILENO, TCSANOW, &orig_term);
1200}
1201
1202/*L:217
1203 * We do PCI. This is mainly done to let us test the kernel virtio PCI
1204 * code.
1205 */
1206
1207/* Linux expects a PCI host bridge: ours is a dummy, and first on the bus. */
1208static struct device pci_host_bridge;
1209
1210static void init_pci_host_bridge(void)
1211{
1212 pci_host_bridge.name = "PCI Host Bridge";
1213 pci_host_bridge.config.class = 0x06; /* bridge */
1214 pci_host_bridge.config.subclass = 0; /* host bridge */
1215 devices.pci[0] = &pci_host_bridge;
1216}
1217
1218/* The IO ports used to read the PCI config space. */
1219#define PCI_CONFIG_ADDR 0xCF8
1220#define PCI_CONFIG_DATA 0xCFC
1221
1222/*
1223 * Not really portable, but does help readability: this is what the Guest
1224 * writes to the PCI_CONFIG_ADDR IO port.
1225 */
1226union pci_config_addr {
1227 struct {
1228 unsigned mbz: 2;
1229 unsigned offset: 6;
1230 unsigned funcnum: 3;
1231 unsigned devnum: 5;
1232 unsigned busnum: 8;
1233 unsigned reserved: 7;
1234 unsigned enabled : 1;
1235 } bits;
1236 u32 val;
1237};
1238
1239/*
1240 * We cache what they wrote to the address port, so we know what they're
1241 * talking about when they access the data port.
1242 */
1243static union pci_config_addr pci_config_addr;
1244
1245static struct device *find_pci_device(unsigned int index)
1246{
1247 return devices.pci[index];
1248}
1249
1250/* PCI can do 1, 2 and 4 byte reads; we handle that here. */
1251static void ioread(u16 off, u32 v, u32 mask, u32 *val)
1252{
1253 assert(off < 4);
1254 assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF);
1255 *val = (v >> (off * 8)) & mask;
1256}
1257
1258/* PCI can do 1, 2 and 4 byte writes; we handle that here. */
1259static void iowrite(u16 off, u32 v, u32 mask, u32 *dst)
1260{
1261 assert(off < 4);
1262 assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF);
1263 *dst &= ~(mask << (off * 8));
1264 *dst |= (v & mask) << (off * 8);
1265}
1266
1267/*
1268 * Where PCI_CONFIG_DATA accesses depends on the previous write to
1269 * PCI_CONFIG_ADDR.
1270 */
1271static struct device *dev_and_reg(u32 *reg)
1272{
1273 if (!pci_config_addr.bits.enabled)
1274 return NULL;
1275
1276 if (pci_config_addr.bits.funcnum != 0)
1277 return NULL;
1278
1279 if (pci_config_addr.bits.busnum != 0)
1280 return NULL;
1281
1282 if (pci_config_addr.bits.offset * 4 >= sizeof(struct pci_config))
1283 return NULL;
1284
1285 *reg = pci_config_addr.bits.offset;
1286 return find_pci_device(pci_config_addr.bits.devnum);
1287}
1288
1289/*
1290 * We can get invalid combinations of values while they're writing, so we
1291 * only fault if they try to write with some invalid bar/offset/length.
1292 */
1293static bool valid_bar_access(struct device *d,
1294 struct virtio_pci_cfg_cap *cfg_access)
1295{
1296 /* We only have 1 bar (BAR0) */
1297 if (cfg_access->cap.bar != 0)
1298 return false;
1299
1300 /* Check it's within BAR0. */
1301 if (cfg_access->cap.offset >= d->mmio_size
1302 || cfg_access->cap.offset + cfg_access->cap.length > d->mmio_size)
1303 return false;
1304
1305 /* Check length is 1, 2 or 4. */
1306 if (cfg_access->cap.length != 1
1307 && cfg_access->cap.length != 2
1308 && cfg_access->cap.length != 4)
1309 return false;
1310
1311 /*
1312 * 4.1.4.7.2:
1313 *
1314 * The driver MUST NOT write a cap.offset which is not a multiple of
1315 * cap.length (ie. all accesses MUST be aligned).
1316 */
1317 if (cfg_access->cap.offset % cfg_access->cap.length != 0)
1318 return false;
1319
1320 /* Return pointer into word in BAR0. */
1321 return true;
1322}
1323
1324/* Is this accessing the PCI config address port?. */
1325static bool is_pci_addr_port(u16 port)
1326{
1327 return port >= PCI_CONFIG_ADDR && port < PCI_CONFIG_ADDR + 4;
1328}
1329
1330static bool pci_addr_iowrite(u16 port, u32 mask, u32 val)
1331{
1332 iowrite(port - PCI_CONFIG_ADDR, val, mask,
1333 &pci_config_addr.val);
1334 verbose("PCI%s: %#x/%x: bus %u dev %u func %u reg %u\n",
1335 pci_config_addr.bits.enabled ? "" : " DISABLED",
1336 val, mask,
1337 pci_config_addr.bits.busnum,
1338 pci_config_addr.bits.devnum,
1339 pci_config_addr.bits.funcnum,
1340 pci_config_addr.bits.offset);
1341 return true;
1342}
1343
1344static void pci_addr_ioread(u16 port, u32 mask, u32 *val)
1345{
1346 ioread(port - PCI_CONFIG_ADDR, pci_config_addr.val, mask, val);
1347}
1348
1349/* Is this accessing the PCI config data port?. */
1350static bool is_pci_data_port(u16 port)
1351{
1352 return port >= PCI_CONFIG_DATA && port < PCI_CONFIG_DATA + 4;
1353}
1354
1355static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask);
1356
1357static bool pci_data_iowrite(u16 port, u32 mask, u32 val)
1358{
1359 u32 reg, portoff;
1360 struct device *d = dev_and_reg(&reg);
1361
1362 /* Complain if they don't belong to a device. */
1363 if (!d)
1364 return false;
1365
1366 /* They can do 1 byte writes, etc. */
1367 portoff = port - PCI_CONFIG_DATA;
1368
1369 /*
1370 * PCI uses a weird way to determine the BAR size: the OS
1371 * writes all 1's, and sees which ones stick.
1372 */
1373 if (&d->config_words[reg] == &d->config.bar[0]) {
1374 int i;
1375
1376 iowrite(portoff, val, mask, &d->config.bar[0]);
1377 for (i = 0; (1 << i) < d->mmio_size; i++)
1378 d->config.bar[0] &= ~(1 << i);
1379 return true;
1380 } else if ((&d->config_words[reg] > &d->config.bar[0]
1381 && &d->config_words[reg] <= &d->config.bar[6])
1382 || &d->config_words[reg] == &d->config.expansion_rom_addr) {
1383 /* Allow writing to any other BAR, or expansion ROM */
1384 iowrite(portoff, val, mask, &d->config_words[reg]);
1385 return true;
1386 /* We let them overide latency timer and cacheline size */
1387 } else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) {
1388 /* Only let them change the first two fields. */
1389 if (mask == 0xFFFFFFFF)
1390 mask = 0xFFFF;
1391 iowrite(portoff, val, mask, &d->config_words[reg]);
1392 return true;
1393 } else if (&d->config_words[reg] == (void *)&d->config.command
1394 && mask == 0xFFFF) {
1395 /* Ignore command writes. */
1396 return true;
1397 } else if (&d->config_words[reg]
1398 == (void *)&d->config.cfg_access.cap.bar
1399 || &d->config_words[reg]
1400 == &d->config.cfg_access.cap.length
1401 || &d->config_words[reg]
1402 == &d->config.cfg_access.cap.offset) {
1403
1404 /*
1405 * The VIRTIO_PCI_CAP_PCI_CFG capability
1406 * provides a backdoor to access the MMIO
1407 * regions without mapping them. Weird, but
1408 * useful.
1409 */
1410 iowrite(portoff, val, mask, &d->config_words[reg]);
1411 return true;
1412 } else if (&d->config_words[reg] == &d->config.cfg_access.pci_cfg_data) {
1413 u32 write_mask;
1414
1415 /*
1416 * 4.1.4.7.1:
1417 *
1418 * Upon detecting driver write access to pci_cfg_data, the
1419 * device MUST execute a write access at offset cap.offset at
1420 * BAR selected by cap.bar using the first cap.length bytes
1421 * from pci_cfg_data.
1422 */
1423
1424 /* Must be bar 0 */
1425 if (!valid_bar_access(d, &d->config.cfg_access))
1426 return false;
1427
1428 iowrite(portoff, val, mask, &d->config.cfg_access.pci_cfg_data);
1429
1430 /*
1431 * Now emulate a write. The mask we use is set by
1432 * len, *not* this write!
1433 */
1434 write_mask = (1ULL<<(8*d->config.cfg_access.cap.length)) - 1;
1435 verbose("Window writing %#x/%#x to bar %u, offset %u len %u\n",
1436 d->config.cfg_access.pci_cfg_data, write_mask,
1437 d->config.cfg_access.cap.bar,
1438 d->config.cfg_access.cap.offset,
1439 d->config.cfg_access.cap.length);
1440
1441 emulate_mmio_write(d, d->config.cfg_access.cap.offset,
1442 d->config.cfg_access.pci_cfg_data,
1443 write_mask);
1444 return true;
1445 }
1446
1447 /*
1448 * 4.1.4.1:
1449 *
1450 * The driver MUST NOT write into any field of the capability
1451 * structure, with the exception of those with cap_type
1452 * VIRTIO_PCI_CAP_PCI_CFG...
1453 */
1454 return false;
1455}
1456
1457static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask);
1458
1459static void pci_data_ioread(u16 port, u32 mask, u32 *val)
1460{
1461 u32 reg;
1462 struct device *d = dev_and_reg(&reg);
1463
1464 if (!d)
1465 return;
1466
1467 /* Read through the PCI MMIO access window is special */
1468 if (&d->config_words[reg] == &d->config.cfg_access.pci_cfg_data) {
1469 u32 read_mask;
1470
1471 /*
1472 * 4.1.4.7.1:
1473 *
1474 * Upon detecting driver read access to pci_cfg_data, the
1475 * device MUST execute a read access of length cap.length at
1476 * offset cap.offset at BAR selected by cap.bar and store the
1477 * first cap.length bytes in pci_cfg_data.
1478 */
1479 /* Must be bar 0 */
1480 if (!valid_bar_access(d, &d->config.cfg_access))
1481 bad_driver(d,
1482 "Invalid cfg_access to bar%u, offset %u len %u",
1483 d->config.cfg_access.cap.bar,
1484 d->config.cfg_access.cap.offset,
1485 d->config.cfg_access.cap.length);
1486
1487 /*
1488 * Read into the window. The mask we use is set by
1489 * len, *not* this read!
1490 */
1491 read_mask = (1ULL<<(8*d->config.cfg_access.cap.length))-1;
1492 d->config.cfg_access.pci_cfg_data
1493 = emulate_mmio_read(d,
1494 d->config.cfg_access.cap.offset,
1495 read_mask);
1496 verbose("Window read %#x/%#x from bar %u, offset %u len %u\n",
1497 d->config.cfg_access.pci_cfg_data, read_mask,
1498 d->config.cfg_access.cap.bar,
1499 d->config.cfg_access.cap.offset,
1500 d->config.cfg_access.cap.length);
1501 }
1502 ioread(port - PCI_CONFIG_DATA, d->config_words[reg], mask, val);
1503}
1504
1018/*L:216 1505/*L:216
1019 * This actually creates the thread which services the virtqueue for a device. 1506 * This is where we emulate a handful of Guest instructions. It's ugly
1507 * and we used to do it in the kernel but it grew over time.
1508 */
1509
1510/*
1511 * We use the ptrace syscall's pt_regs struct to talk about registers
1512 * to lguest: these macros convert the names to the offsets.
1513 */
1514#define getreg(name) getreg_off(offsetof(struct user_regs_struct, name))
1515#define setreg(name, val) \
1516 setreg_off(offsetof(struct user_regs_struct, name), (val))
1517
1518static u32 getreg_off(size_t offset)
1519{
1520 u32 r;
1521 unsigned long args[] = { LHREQ_GETREG, offset };
1522
1523 if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0)
1524 err(1, "Getting register %u", offset);
1525 if (pread(lguest_fd, &r, sizeof(r), cpu_id) != sizeof(r))
1526 err(1, "Reading register %u", offset);
1527
1528 return r;
1529}
1530
1531static void setreg_off(size_t offset, u32 val)
1532{
1533 unsigned long args[] = { LHREQ_SETREG, offset, val };
1534
1535 if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0)
1536 err(1, "Setting register %u", offset);
1537}
1538
1539/* Get register by instruction encoding */
1540static u32 getreg_num(unsigned regnum, u32 mask)
1541{
1542 /* 8 bit ops use regnums 4-7 for high parts of word */
1543 if (mask == 0xFF && (regnum & 0x4))
1544 return getreg_num(regnum & 0x3, 0xFFFF) >> 8;
1545
1546 switch (regnum) {
1547 case 0: return getreg(eax) & mask;
1548 case 1: return getreg(ecx) & mask;
1549 case 2: return getreg(edx) & mask;
1550 case 3: return getreg(ebx) & mask;
1551 case 4: return getreg(esp) & mask;
1552 case 5: return getreg(ebp) & mask;
1553 case 6: return getreg(esi) & mask;
1554 case 7: return getreg(edi) & mask;
1555 }
1556 abort();
1557}
1558
1559/* Set register by instruction encoding */
1560static void setreg_num(unsigned regnum, u32 val, u32 mask)
1561{
1562 /* Don't try to set bits out of range */
1563 assert(~(val & ~mask));
1564
1565 /* 8 bit ops use regnums 4-7 for high parts of word */
1566 if (mask == 0xFF && (regnum & 0x4)) {
1567 /* Construct the 16 bits we want. */
1568 val = (val << 8) | getreg_num(regnum & 0x3, 0xFF);
1569 setreg_num(regnum & 0x3, val, 0xFFFF);
1570 return;
1571 }
1572
1573 switch (regnum) {
1574 case 0: setreg(eax, val | (getreg(eax) & ~mask)); return;
1575 case 1: setreg(ecx, val | (getreg(ecx) & ~mask)); return;
1576 case 2: setreg(edx, val | (getreg(edx) & ~mask)); return;
1577 case 3: setreg(ebx, val | (getreg(ebx) & ~mask)); return;
1578 case 4: setreg(esp, val | (getreg(esp) & ~mask)); return;
1579 case 5: setreg(ebp, val | (getreg(ebp) & ~mask)); return;
1580 case 6: setreg(esi, val | (getreg(esi) & ~mask)); return;
1581 case 7: setreg(edi, val | (getreg(edi) & ~mask)); return;
1582 }
1583 abort();
1584}
1585
1586/* Get bytes of displacement appended to instruction, from r/m encoding */
1587static u32 insn_displacement_len(u8 mod_reg_rm)
1588{
1589 /* Switch on the mod bits */
1590 switch (mod_reg_rm >> 6) {
1591 case 0:
1592 /* If mod == 0, and r/m == 101, 16-bit displacement follows */
1593 if ((mod_reg_rm & 0x7) == 0x5)
1594 return 2;
1595 /* Normally, mod == 0 means no literal displacement */
1596 return 0;
1597 case 1:
1598 /* One byte displacement */
1599 return 1;
1600 case 2:
1601 /* Four byte displacement */
1602 return 4;
1603 case 3:
1604 /* Register mode */
1605 return 0;
1606 }
1607 abort();
1608}
1609
1610static void emulate_insn(const u8 insn[])
1611{
1612 unsigned long args[] = { LHREQ_TRAP, 13 };
1613 unsigned int insnlen = 0, in = 0, small_operand = 0, byte_access;
1614 unsigned int eax, port, mask;
1615 /*
1616 * Default is to return all-ones on IO port reads, which traditionally
1617 * means "there's nothing there".
1618 */
1619 u32 val = 0xFFFFFFFF;
1620
1621 /*
1622 * This must be the Guest kernel trying to do something, not userspace!
1623 * The bottom two bits of the CS segment register are the privilege
1624 * level.
1625 */
1626 if ((getreg(xcs) & 3) != 0x1)
1627 goto no_emulate;
1628
1629 /* Decoding x86 instructions is icky. */
1630
1631 /*
1632 * Around 2.6.33, the kernel started using an emulation for the
1633 * cmpxchg8b instruction in early boot on many configurations. This
1634 * code isn't paravirtualized, and it tries to disable interrupts.
1635 * Ignore it, which will Mostly Work.
1636 */
1637 if (insn[insnlen] == 0xfa) {
1638 /* "cli", or Clear Interrupt Enable instruction. Skip it. */
1639 insnlen = 1;
1640 goto skip_insn;
1641 }
1642
1643 /*
1644 * 0x66 is an "operand prefix". It means a 16, not 32 bit in/out.
1645 */
1646 if (insn[insnlen] == 0x66) {
1647 small_operand = 1;
1648 /* The instruction is 1 byte so far, read the next byte. */
1649 insnlen = 1;
1650 }
1651
1652 /* If the lower bit isn't set, it's a single byte access */
1653 byte_access = !(insn[insnlen] & 1);
1654
1655 /*
1656 * Now we can ignore the lower bit and decode the 4 opcodes
1657 * we need to emulate.
1658 */
1659 switch (insn[insnlen] & 0xFE) {
1660 case 0xE4: /* in <next byte>,%al */
1661 port = insn[insnlen+1];
1662 insnlen += 2;
1663 in = 1;
1664 break;
1665 case 0xEC: /* in (%dx),%al */
1666 port = getreg(edx) & 0xFFFF;
1667 insnlen += 1;
1668 in = 1;
1669 break;
1670 case 0xE6: /* out %al,<next byte> */
1671 port = insn[insnlen+1];
1672 insnlen += 2;
1673 break;
1674 case 0xEE: /* out %al,(%dx) */
1675 port = getreg(edx) & 0xFFFF;
1676 insnlen += 1;
1677 break;
1678 default:
1679 /* OK, we don't know what this is, can't emulate. */
1680 goto no_emulate;
1681 }
1682
1683 /* Set a mask of the 1, 2 or 4 bytes, depending on size of IO */
1684 if (byte_access)
1685 mask = 0xFF;
1686 else if (small_operand)
1687 mask = 0xFFFF;
1688 else
1689 mask = 0xFFFFFFFF;
1690
1691 /*
1692 * If it was an "IN" instruction, they expect the result to be read
1693 * into %eax, so we change %eax.
1694 */
1695 eax = getreg(eax);
1696
1697 if (in) {
1698 /* This is the PS/2 keyboard status; 1 means ready for output */
1699 if (port == 0x64)
1700 val = 1;
1701 else if (is_pci_addr_port(port))
1702 pci_addr_ioread(port, mask, &val);
1703 else if (is_pci_data_port(port))
1704 pci_data_ioread(port, mask, &val);
1705
1706 /* Clear the bits we're about to read */
1707 eax &= ~mask;
1708 /* Copy bits in from val. */
1709 eax |= val & mask;
1710 /* Now update the register. */
1711 setreg(eax, eax);
1712 } else {
1713 if (is_pci_addr_port(port)) {
1714 if (!pci_addr_iowrite(port, mask, eax))
1715 goto bad_io;
1716 } else if (is_pci_data_port(port)) {
1717 if (!pci_data_iowrite(port, mask, eax))
1718 goto bad_io;
1719 }
1720 /* There are many other ports, eg. CMOS clock, serial
1721 * and parallel ports, so we ignore them all. */
1722 }
1723
1724 verbose("IO %s of %x to %u: %#08x\n",
1725 in ? "IN" : "OUT", mask, port, eax);
1726skip_insn:
1727 /* Finally, we've "done" the instruction, so move past it. */
1728 setreg(eip, getreg(eip) + insnlen);
1729 return;
1730
1731bad_io:
1732 warnx("Attempt to %s port %u (%#x mask)",
1733 in ? "read from" : "write to", port, mask);
1734
1735no_emulate:
1736 /* Inject trap into Guest. */
1737 if (write(lguest_fd, args, sizeof(args)) < 0)
1738 err(1, "Reinjecting trap 13 for fault at %#x", getreg(eip));
1739}
1740
1741static struct device *find_mmio_region(unsigned long paddr, u32 *off)
1742{
1743 unsigned int i;
1744
1745 for (i = 1; i < MAX_PCI_DEVICES; i++) {
1746 struct device *d = devices.pci[i];
1747
1748 if (!d)
1749 continue;
1750 if (paddr < d->mmio_addr)
1751 continue;
1752 if (paddr >= d->mmio_addr + d->mmio_size)
1753 continue;
1754 *off = paddr - d->mmio_addr;
1755 return d;
1756 }
1757 return NULL;
1758}
1759
1760/* FIXME: Use vq array. */
1761static struct virtqueue *vq_by_num(struct device *d, u32 num)
1762{
1763 struct virtqueue *vq = d->vq;
1764
1765 while (num-- && vq)
1766 vq = vq->next;
1767
1768 return vq;
1769}
1770
1771static void save_vq_config(const struct virtio_pci_common_cfg *cfg,
1772 struct virtqueue *vq)
1773{
1774 vq->pci_config = *cfg;
1775}
1776
1777static void restore_vq_config(struct virtio_pci_common_cfg *cfg,
1778 struct virtqueue *vq)
1779{
1780 /* Only restore the per-vq part */
1781 size_t off = offsetof(struct virtio_pci_common_cfg, queue_size);
1782
1783 memcpy((void *)cfg + off, (void *)&vq->pci_config + off,
1784 sizeof(*cfg) - off);
1785}
1786
1787/*
1788 * 4.1.4.3.2:
1789 *
1790 * The driver MUST configure the other virtqueue fields before
1791 * enabling the virtqueue with queue_enable.
1792 *
1793 * When they enable the virtqueue, we check that their setup is valid.
1020 */ 1794 */
1021static void create_thread(struct virtqueue *vq) 1795static void check_virtqueue(struct device *d, struct virtqueue *vq)
1796{
1797 /* Because lguest is 32 bit, all the descriptor high bits must be 0 */
1798 if (vq->pci_config.queue_desc_hi
1799 || vq->pci_config.queue_avail_hi
1800 || vq->pci_config.queue_used_hi)
1801 bad_driver_vq(vq, "invalid 64-bit queue address");
1802
1803 /*
1804 * 2.4.1:
1805 *
1806 * The driver MUST ensure that the physical address of the first byte
1807 * of each virtqueue part is a multiple of the specified alignment
1808 * value in the above table.
1809 */
1810 if (vq->pci_config.queue_desc_lo % 16
1811 || vq->pci_config.queue_avail_lo % 2
1812 || vq->pci_config.queue_used_lo % 4)
1813 bad_driver_vq(vq, "invalid alignment in queue addresses");
1814
1815 /* Initialize the virtqueue and check they're all in range. */
1816 vq->vring.num = vq->pci_config.queue_size;
1817 vq->vring.desc = check_pointer(vq->dev,
1818 vq->pci_config.queue_desc_lo,
1819 sizeof(*vq->vring.desc) * vq->vring.num);
1820 vq->vring.avail = check_pointer(vq->dev,
1821 vq->pci_config.queue_avail_lo,
1822 sizeof(*vq->vring.avail)
1823 + (sizeof(vq->vring.avail->ring[0])
1824 * vq->vring.num));
1825 vq->vring.used = check_pointer(vq->dev,
1826 vq->pci_config.queue_used_lo,
1827 sizeof(*vq->vring.used)
1828 + (sizeof(vq->vring.used->ring[0])
1829 * vq->vring.num));
1830
1831 /*
1832 * 2.4.9.1:
1833 *
1834 * The driver MUST initialize flags in the used ring to 0
1835 * when allocating the used ring.
1836 */
1837 if (vq->vring.used->flags != 0)
1838 bad_driver_vq(vq, "invalid initial used.flags %#x",
1839 vq->vring.used->flags);
1840}
1841
1842static void start_virtqueue(struct virtqueue *vq)
1022{ 1843{
1023 /* 1844 /*
1024 * Create stack for thread. Since the stack grows upwards, we point 1845 * Create stack for thread. Since the stack grows upwards, we point
1025 * the stack pointer to the end of this region. 1846 * the stack pointer to the end of this region.
1026 */ 1847 */
1027 char *stack = malloc(32768); 1848 char *stack = malloc(32768);
1028 unsigned long args[] = { LHREQ_EVENTFD,
1029 vq->config.pfn*getpagesize(), 0 };
1030 1849
1031 /* Create a zero-initialized eventfd. */ 1850 /* Create a zero-initialized eventfd. */
1032 vq->eventfd = eventfd(0, 0); 1851 vq->eventfd = eventfd(0, 0);
1033 if (vq->eventfd < 0) 1852 if (vq->eventfd < 0)
1034 err(1, "Creating eventfd"); 1853 err(1, "Creating eventfd");
1035 args[2] = vq->eventfd;
1036
1037 /*
1038 * Attach an eventfd to this virtqueue: it will go off when the Guest
1039 * does an LHCALL_NOTIFY for this vq.
1040 */
1041 if (write(lguest_fd, &args, sizeof(args)) != 0)
1042 err(1, "Attaching eventfd");
1043 1854
1044 /* 1855 /*
1045 * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so 1856 * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so
@@ -1048,167 +1859,531 @@ static void create_thread(struct virtqueue *vq)
1048 vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq); 1859 vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq);
1049 if (vq->thread == (pid_t)-1) 1860 if (vq->thread == (pid_t)-1)
1050 err(1, "Creating clone"); 1861 err(1, "Creating clone");
1051
1052 /* We close our local copy now the child has it. */
1053 close(vq->eventfd);
1054} 1862}
1055 1863
1056static void start_device(struct device *dev) 1864static void start_virtqueues(struct device *d)
1057{ 1865{
1058 unsigned int i;
1059 struct virtqueue *vq; 1866 struct virtqueue *vq;
1060 1867
1061 verbose("Device %s OK: offered", dev->name); 1868 for (vq = d->vq; vq; vq = vq->next) {
1062 for (i = 0; i < dev->feature_len; i++) 1869 if (vq->pci_config.queue_enable)
1063 verbose(" %02x", get_feature_bits(dev)[i]); 1870 start_virtqueue(vq);
1064 verbose(", accepted");
1065 for (i = 0; i < dev->feature_len; i++)
1066 verbose(" %02x", get_feature_bits(dev)
1067 [dev->feature_len+i]);
1068
1069 for (vq = dev->vq; vq; vq = vq->next) {
1070 if (vq->service)
1071 create_thread(vq);
1072 } 1871 }
1073 dev->running = true;
1074} 1872}
1075 1873
1076static void cleanup_devices(void) 1874static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
1077{ 1875{
1078 struct device *dev; 1876 struct virtqueue *vq;
1079 1877
1080 for (dev = devices.dev; dev; dev = dev->next) 1878 switch (off) {
1081 reset_device(dev); 1879 case offsetof(struct virtio_pci_mmio, cfg.device_feature_select):
1880 /*
1881 * 4.1.4.3.1:
1882 *
1883 * The device MUST present the feature bits it is offering in
1884 * device_feature, starting at bit device_feature_select ∗ 32
1885 * for any device_feature_select written by the driver
1886 */
1887 if (val == 0)
1888 d->mmio->cfg.device_feature = d->features;
1889 else if (val == 1)
1890 d->mmio->cfg.device_feature = (d->features >> 32);
1891 else
1892 d->mmio->cfg.device_feature = 0;
1893 goto feature_write_through32;
1894 case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
1895 if (val > 1)
1896 bad_driver(d, "Unexpected driver select %u", val);
1897 goto feature_write_through32;
1898 case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
1899 if (d->mmio->cfg.guest_feature_select == 0) {
1900 d->features_accepted &= ~((u64)0xFFFFFFFF);
1901 d->features_accepted |= val;
1902 } else {
1903 assert(d->mmio->cfg.guest_feature_select == 1);
1904 d->features_accepted &= 0xFFFFFFFF;
1905 d->features_accepted |= ((u64)val) << 32;
1906 }
1907 /*
1908 * 2.2.1:
1909 *
1910 * The driver MUST NOT accept a feature which the device did
1911 * not offer
1912 */
1913 if (d->features_accepted & ~d->features)
1914 bad_driver(d, "over-accepted features %#llx of %#llx",
1915 d->features_accepted, d->features);
1916 goto feature_write_through32;
1917 case offsetof(struct virtio_pci_mmio, cfg.device_status): {
1918 u8 prev;
1919
1920 verbose("%s: device status -> %#x\n", d->name, val);
1921 /*
1922 * 4.1.4.3.1:
1923 *
1924 * The device MUST reset when 0 is written to device_status,
1925 * and present a 0 in device_status once that is done.
1926 */
1927 if (val == 0) {
1928 reset_device(d);
1929 goto write_through8;
1930 }
1082 1931
1083 /* If we saved off the original terminal settings, restore them now. */ 1932 /* 2.1.1: The driver MUST NOT clear a device status bit. */
1084 if (orig_term.c_lflag & (ISIG|ICANON|ECHO)) 1933 if (d->mmio->cfg.device_status & ~val)
1085 tcsetattr(STDIN_FILENO, TCSANOW, &orig_term); 1934 bad_driver(d, "unset of device status bit %#x -> %#x",
1086} 1935 d->mmio->cfg.device_status, val);
1087 1936
1088/* When the Guest tells us they updated the status field, we handle it. */ 1937 /*
1089static void update_device_status(struct device *dev) 1938 * 2.1.2:
1090{ 1939 *
1091 /* A zero status is a reset, otherwise it's a set of flags. */ 1940 * The device MUST NOT consume buffers or notify the driver
1092 if (dev->desc->status == 0) 1941 * before DRIVER_OK.
1093 reset_device(dev); 1942 */
1094 else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) { 1943 if (val & VIRTIO_CONFIG_S_DRIVER_OK
1095 warnx("Device %s configuration FAILED", dev->name); 1944 && !(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK))
1096 if (dev->running) 1945 start_virtqueues(d);
1097 reset_device(dev); 1946
1098 } else { 1947 /*
1099 if (dev->running) 1948 * 3.1.1:
1100 err(1, "Device %s features finalized twice", dev->name); 1949 *
1101 start_device(dev); 1950 * The driver MUST follow this sequence to initialize a device:
1951 * - Reset the device.
1952 * - Set the ACKNOWLEDGE status bit: the guest OS has
1953 * notice the device.
1954 * - Set the DRIVER status bit: the guest OS knows how
1955 * to drive the device.
1956 * - Read device feature bits, and write the subset
1957 * of feature bits understood by the OS and driver
1958 * to the device. During this step the driver MAY
1959 * read (but MUST NOT write) the device-specific
1960 * configuration fields to check that it can
1961 * support the device before accepting it.
1962 * - Set the FEATURES_OK status bit. The driver
1963 * MUST not accept new feature bits after this
1964 * step.
1965 * - Re-read device status to ensure the FEATURES_OK
1966 * bit is still set: otherwise, the device does
1967 * not support our subset of features and the
1968 * device is unusable.
1969 * - Perform device-specific setup, including
1970 * discovery of virtqueues for the device,
1971 * optional per-bus setup, reading and possibly
1972 * writing the device’s virtio configuration
1973 * space, and population of virtqueues.
1974 * - Set the DRIVER_OK status bit. At this point the
1975 * device is “live”.
1976 */
1977 prev = 0;
1978 switch (val & ~d->mmio->cfg.device_status) {
1979 case VIRTIO_CONFIG_S_DRIVER_OK:
1980 prev |= VIRTIO_CONFIG_S_FEATURES_OK; /* fall thru */
1981 case VIRTIO_CONFIG_S_FEATURES_OK:
1982 prev |= VIRTIO_CONFIG_S_DRIVER; /* fall thru */
1983 case VIRTIO_CONFIG_S_DRIVER:
1984 prev |= VIRTIO_CONFIG_S_ACKNOWLEDGE; /* fall thru */
1985 case VIRTIO_CONFIG_S_ACKNOWLEDGE:
1986 break;
1987 default:
1988 bad_driver(d, "unknown device status bit %#x -> %#x",
1989 d->mmio->cfg.device_status, val);
1990 }
1991 if (d->mmio->cfg.device_status != prev)
1992 bad_driver(d, "unexpected status transition %#x -> %#x",
1993 d->mmio->cfg.device_status, val);
1994
1995 /* If they just wrote FEATURES_OK, we make sure they read */
1996 switch (val & ~d->mmio->cfg.device_status) {
1997 case VIRTIO_CONFIG_S_FEATURES_OK:
1998 d->wrote_features_ok = true;
1999 break;
2000 case VIRTIO_CONFIG_S_DRIVER_OK:
2001 if (d->wrote_features_ok)
2002 bad_driver(d, "did not re-read FEATURES_OK");
2003 break;
2004 }
2005 goto write_through8;
1102 } 2006 }
1103} 2007 case offsetof(struct virtio_pci_mmio, cfg.queue_select):
2008 vq = vq_by_num(d, val);
2009 /*
2010 * 4.1.4.3.1:
2011 *
2012 * The device MUST present a 0 in queue_size if the virtqueue
2013 * corresponding to the current queue_select is unavailable.
2014 */
2015 if (!vq) {
2016 d->mmio->cfg.queue_size = 0;
2017 goto write_through16;
2018 }
2019 /* Save registers for old vq, if it was a valid vq */
2020 if (d->mmio->cfg.queue_size)
2021 save_vq_config(&d->mmio->cfg,
2022 vq_by_num(d, d->mmio->cfg.queue_select));
2023 /* Restore the registers for the queue they asked for */
2024 restore_vq_config(&d->mmio->cfg, vq);
2025 goto write_through16;
2026 case offsetof(struct virtio_pci_mmio, cfg.queue_size):
2027 /*
2028 * 4.1.4.3.2:
2029 *
2030 * The driver MUST NOT write a value which is not a power of 2
2031 * to queue_size.
2032 */
2033 if (val & (val-1))
2034 bad_driver(d, "invalid queue size %u", val);
2035 if (d->mmio->cfg.queue_enable)
2036 bad_driver(d, "changing queue size on live device");
2037 goto write_through16;
2038 case offsetof(struct virtio_pci_mmio, cfg.queue_msix_vector):
2039 bad_driver(d, "attempt to set MSIX vector to %u", val);
2040 case offsetof(struct virtio_pci_mmio, cfg.queue_enable): {
2041 struct virtqueue *vq = vq_by_num(d, d->mmio->cfg.queue_select);
1104 2042
1105/*L:215 2043 /*
1106 * This is the generic routine we call when the Guest uses LHCALL_NOTIFY. In 2044 * 4.1.4.3.2:
1107 * particular, it's used to notify us of device status changes during boot. 2045 *
1108 */ 2046 * The driver MUST NOT write a 0 to queue_enable.
1109static void handle_output(unsigned long addr) 2047 */
1110{ 2048 if (val != 1)
1111 struct device *i; 2049 bad_driver(d, "setting queue_enable to %u", val);
1112 2050
1113 /* Check each device. */ 2051 /*
1114 for (i = devices.dev; i; i = i->next) { 2052 * 3.1.1:
1115 struct virtqueue *vq; 2053 *
2054 * 7. Perform device-specific setup, including discovery of
2055 * virtqueues for the device, optional per-bus setup,
2056 * reading and possibly writing the device’s virtio
2057 * configuration space, and population of virtqueues.
2058 * 8. Set the DRIVER_OK status bit.
2059 *
2060 * All our devices require all virtqueues to be enabled, so
2061 * they should have done that before setting DRIVER_OK.
2062 */
2063 if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK)
2064 bad_driver(d, "enabling vq after DRIVER_OK");
1116 2065
2066 d->mmio->cfg.queue_enable = val;
2067 save_vq_config(&d->mmio->cfg, vq);
2068 check_virtqueue(d, vq);
2069 goto write_through16;
2070 }
2071 case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off):
2072 bad_driver(d, "attempt to write to queue_notify_off");
2073 case offsetof(struct virtio_pci_mmio, cfg.queue_desc_lo):
2074 case offsetof(struct virtio_pci_mmio, cfg.queue_desc_hi):
2075 case offsetof(struct virtio_pci_mmio, cfg.queue_avail_lo):
2076 case offsetof(struct virtio_pci_mmio, cfg.queue_avail_hi):
2077 case offsetof(struct virtio_pci_mmio, cfg.queue_used_lo):
2078 case offsetof(struct virtio_pci_mmio, cfg.queue_used_hi):
1117 /* 2079 /*
1118 * Notifications to device descriptors mean they updated the 2080 * 4.1.4.3.2:
1119 * device status. 2081 *
2082 * The driver MUST configure the other virtqueue fields before
2083 * enabling the virtqueue with queue_enable.
1120 */ 2084 */
1121 if (from_guest_phys(addr) == i->desc) { 2085 if (d->mmio->cfg.queue_enable)
1122 update_device_status(i); 2086 bad_driver(d, "changing queue on live device");
1123 return; 2087
1124 } 2088 /*
2089 * 3.1.1:
2090 *
2091 * The driver MUST follow this sequence to initialize a device:
2092 *...
2093 * 5. Set the FEATURES_OK status bit. The driver MUST not
2094 * accept new feature bits after this step.
2095 */
2096 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK))
2097 bad_driver(d, "setting up vq before FEATURES_OK");
1125 2098
1126 /* Devices should not be used before features are finalized. */ 2099 /*
1127 for (vq = i->vq; vq; vq = vq->next) { 2100 * 6. Re-read device status to ensure the FEATURES_OK bit is
1128 if (addr != vq->config.pfn*getpagesize()) 2101 * still set...
1129 continue; 2102 */
1130 errx(1, "Notification on %s before setup!", i->name); 2103 if (d->wrote_features_ok)
2104 bad_driver(d, "didn't re-read FEATURES_OK before setup");
2105
2106 goto write_through32;
2107 case offsetof(struct virtio_pci_mmio, notify):
2108 vq = vq_by_num(d, val);
2109 if (!vq)
2110 bad_driver(d, "Invalid vq notification on %u", val);
2111 /* Notify the process handling this vq by adding 1 to eventfd */
2112 write(vq->eventfd, "\1\0\0\0\0\0\0\0", 8);
2113 goto write_through16;
2114 case offsetof(struct virtio_pci_mmio, isr):
2115 bad_driver(d, "Unexpected write to isr");
2116 /* Weird corner case: write to emerg_wr of console */
2117 case sizeof(struct virtio_pci_mmio)
2118 + offsetof(struct virtio_console_config, emerg_wr):
2119 if (strcmp(d->name, "console") == 0) {
2120 char c = val;
2121 write(STDOUT_FILENO, &c, 1);
2122 goto write_through32;
1131 } 2123 }
2124 /* Fall through... */
2125 default:
2126 /*
2127 * 4.1.4.3.2:
2128 *
2129 * The driver MUST NOT write to device_feature, num_queues,
2130 * config_generation or queue_notify_off.
2131 */
2132 bad_driver(d, "Unexpected write to offset %u", off);
1132 } 2133 }
1133 2134
2135feature_write_through32:
1134 /* 2136 /*
1135 * Early console write is done using notify on a nul-terminated string 2137 * 3.1.1:
1136 * in Guest memory. It's also great for hacking debugging messages 2138 *
1137 * into a Guest. 2139 * The driver MUST follow this sequence to initialize a device:
2140 *...
2141 * - Set the DRIVER status bit: the guest OS knows how
2142 * to drive the device.
2143 * - Read device feature bits, and write the subset
2144 * of feature bits understood by the OS and driver
2145 * to the device.
2146 *...
2147 * - Set the FEATURES_OK status bit. The driver MUST not
2148 * accept new feature bits after this step.
1138 */ 2149 */
1139 if (addr >= guest_limit) 2150 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER))
1140 errx(1, "Bad NOTIFY %#lx", addr); 2151 bad_driver(d, "feature write before VIRTIO_CONFIG_S_DRIVER");
2152 if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK)
2153 bad_driver(d, "feature write after VIRTIO_CONFIG_S_FEATURES_OK");
1141 2154
1142 write(STDOUT_FILENO, from_guest_phys(addr), 2155 /*
1143 strnlen(from_guest_phys(addr), guest_limit - addr)); 2156 * 4.1.3.1:
2157 *
2158 * The driver MUST access each field using the “natural” access
2159 * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses for
2160 * 16-bit fields and 8-bit accesses for 8-bit fields.
2161 */
2162write_through32:
2163 if (mask != 0xFFFFFFFF) {
2164 bad_driver(d, "non-32-bit write to offset %u (%#x)",
2165 off, getreg(eip));
2166 return;
2167 }
2168 memcpy((char *)d->mmio + off, &val, 4);
2169 return;
2170
2171write_through16:
2172 if (mask != 0xFFFF)
2173 bad_driver(d, "non-16-bit write to offset %u (%#x)",
2174 off, getreg(eip));
2175 memcpy((char *)d->mmio + off, &val, 2);
2176 return;
2177
2178write_through8:
2179 if (mask != 0xFF)
2180 bad_driver(d, "non-8-bit write to offset %u (%#x)",
2181 off, getreg(eip));
2182 memcpy((char *)d->mmio + off, &val, 1);
2183 return;
1144} 2184}
1145 2185
1146/*L:190 2186static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask)
1147 * Device Setup
1148 *
1149 * All devices need a descriptor so the Guest knows it exists, and a "struct
1150 * device" so the Launcher can keep track of it. We have common helper
1151 * routines to allocate and manage them.
1152 */
1153
1154/*
1155 * The layout of the device page is a "struct lguest_device_desc" followed by a
1156 * number of virtqueue descriptors, then two sets of feature bits, then an
1157 * array of configuration bytes. This routine returns the configuration
1158 * pointer.
1159 */
1160static u8 *device_config(const struct device *dev)
1161{ 2187{
1162 return (void *)(dev->desc + 1) 2188 u8 isr;
1163 + dev->num_vq * sizeof(struct lguest_vqconfig) 2189 u32 val = 0;
1164 + dev->feature_len * 2; 2190
2191 switch (off) {
2192 case offsetof(struct virtio_pci_mmio, cfg.device_feature_select):
2193 case offsetof(struct virtio_pci_mmio, cfg.device_feature):
2194 case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
2195 case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
2196 /*
2197 * 3.1.1:
2198 *
2199 * The driver MUST follow this sequence to initialize a device:
2200 *...
2201 * - Set the DRIVER status bit: the guest OS knows how
2202 * to drive the device.
2203 * - Read device feature bits, and write the subset
2204 * of feature bits understood by the OS and driver
2205 * to the device.
2206 */
2207 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER))
2208 bad_driver(d,
2209 "feature read before VIRTIO_CONFIG_S_DRIVER");
2210 goto read_through32;
2211 case offsetof(struct virtio_pci_mmio, cfg.msix_config):
2212 bad_driver(d, "read of msix_config");
2213 case offsetof(struct virtio_pci_mmio, cfg.num_queues):
2214 goto read_through16;
2215 case offsetof(struct virtio_pci_mmio, cfg.device_status):
2216 /* As they did read, any write of FEATURES_OK is now fine. */
2217 d->wrote_features_ok = false;
2218 goto read_through8;
2219 case offsetof(struct virtio_pci_mmio, cfg.config_generation):
2220 /*
2221 * 4.1.4.3.1:
2222 *
2223 * The device MUST present a changed config_generation after
2224 * the driver has read a device-specific configuration value
2225 * which has changed since any part of the device-specific
2226 * configuration was last read.
2227 *
2228 * This is simple: none of our devices change config, so this
2229 * is always 0.
2230 */
2231 goto read_through8;
2232 case offsetof(struct virtio_pci_mmio, notify):
2233 /*
2234 * 3.1.1:
2235 *
2236 * The driver MUST NOT notify the device before setting
2237 * DRIVER_OK.
2238 */
2239 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK))
2240 bad_driver(d, "notify before VIRTIO_CONFIG_S_DRIVER_OK");
2241 goto read_through16;
2242 case offsetof(struct virtio_pci_mmio, isr):
2243 if (mask != 0xFF)
2244 bad_driver(d, "non-8-bit read from offset %u (%#x)",
2245 off, getreg(eip));
2246 isr = d->mmio->isr;
2247 /*
2248 * 4.1.4.5.1:
2249 *
2250 * The device MUST reset ISR status to 0 on driver read.
2251 */
2252 d->mmio->isr = 0;
2253 return isr;
2254 case offsetof(struct virtio_pci_mmio, padding):
2255 bad_driver(d, "read from padding (%#x)", getreg(eip));
2256 default:
2257 /* Read from device config space, beware unaligned overflow */
2258 if (off > d->mmio_size - 4)
2259 bad_driver(d, "read past end (%#x)", getreg(eip));
2260
2261 /*
2262 * 3.1.1:
2263 * The driver MUST follow this sequence to initialize a device:
2264 *...
2265 * 3. Set the DRIVER status bit: the guest OS knows how to
2266 * drive the device.
2267 * 4. Read device feature bits, and write the subset of
2268 * feature bits understood by the OS and driver to the
2269 * device. During this step the driver MAY read (but MUST NOT
2270 * write) the device-specific configuration fields to check
2271 * that it can support the device before accepting it.
2272 */
2273 if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER))
2274 bad_driver(d,
2275 "config read before VIRTIO_CONFIG_S_DRIVER");
2276
2277 if (mask == 0xFFFFFFFF)
2278 goto read_through32;
2279 else if (mask == 0xFFFF)
2280 goto read_through16;
2281 else
2282 goto read_through8;
2283 }
2284
2285 /*
2286 * 4.1.3.1:
2287 *
2288 * The driver MUST access each field using the “natural” access
2289 * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses for
2290 * 16-bit fields and 8-bit accesses for 8-bit fields.
2291 */
2292read_through32:
2293 if (mask != 0xFFFFFFFF)
2294 bad_driver(d, "non-32-bit read to offset %u (%#x)",
2295 off, getreg(eip));
2296 memcpy(&val, (char *)d->mmio + off, 4);
2297 return val;
2298
2299read_through16:
2300 if (mask != 0xFFFF)
2301 bad_driver(d, "non-16-bit read to offset %u (%#x)",
2302 off, getreg(eip));
2303 memcpy(&val, (char *)d->mmio + off, 2);
2304 return val;
2305
2306read_through8:
2307 if (mask != 0xFF)
2308 bad_driver(d, "non-8-bit read to offset %u (%#x)",
2309 off, getreg(eip));
2310 memcpy(&val, (char *)d->mmio + off, 1);
2311 return val;
1165} 2312}
1166 2313
1167/* 2314static void emulate_mmio(unsigned long paddr, const u8 *insn)
1168 * This routine allocates a new "struct lguest_device_desc" from descriptor
1169 * table page just above the Guest's normal memory. It returns a pointer to
1170 * that descriptor.
1171 */
1172static struct lguest_device_desc *new_dev_desc(u16 type)
1173{ 2315{
1174 struct lguest_device_desc d = { .type = type }; 2316 u32 val, off, mask = 0xFFFFFFFF, insnlen = 0;
1175 void *p; 2317 struct device *d = find_mmio_region(paddr, &off);
2318 unsigned long args[] = { LHREQ_TRAP, 14 };
1176 2319
1177 /* Figure out where the next device config is, based on the last one. */ 2320 if (!d) {
1178 if (devices.lastdev) 2321 warnx("MMIO touching %#08lx (not a device)", paddr);
1179 p = device_config(devices.lastdev) 2322 goto reinject;
1180 + devices.lastdev->desc->config_len; 2323 }
1181 else 2324
1182 p = devices.descpage; 2325 /* Prefix makes it a 16 bit op */
2326 if (insn[0] == 0x66) {
2327 mask = 0xFFFF;
2328 insnlen++;
2329 }
1183 2330
1184 /* We only have one page for all the descriptors. */ 2331 /* iowrite */
1185 if (p + sizeof(d) > (void *)devices.descpage + getpagesize()) 2332 if (insn[insnlen] == 0x89) {
1186 errx(1, "Too many devices"); 2333 /* Next byte is r/m byte: bits 3-5 are register. */
2334 val = getreg_num((insn[insnlen+1] >> 3) & 0x7, mask);
2335 emulate_mmio_write(d, off, val, mask);
2336 insnlen += 2 + insn_displacement_len(insn[insnlen+1]);
2337 } else if (insn[insnlen] == 0x8b) { /* ioread */
2338 /* Next byte is r/m byte: bits 3-5 are register. */
2339 val = emulate_mmio_read(d, off, mask);
2340 setreg_num((insn[insnlen+1] >> 3) & 0x7, val, mask);
2341 insnlen += 2 + insn_displacement_len(insn[insnlen+1]);
2342 } else if (insn[0] == 0x88) { /* 8-bit iowrite */
2343 mask = 0xff;
2344 /* Next byte is r/m byte: bits 3-5 are register. */
2345 val = getreg_num((insn[1] >> 3) & 0x7, mask);
2346 emulate_mmio_write(d, off, val, mask);
2347 insnlen = 2 + insn_displacement_len(insn[1]);
2348 } else if (insn[0] == 0x8a) { /* 8-bit ioread */
2349 mask = 0xff;
2350 val = emulate_mmio_read(d, off, mask);
2351 setreg_num((insn[1] >> 3) & 0x7, val, mask);
2352 insnlen = 2 + insn_displacement_len(insn[1]);
2353 } else {
2354 warnx("Unknown MMIO instruction touching %#08lx:"
2355 " %02x %02x %02x %02x at %u",
2356 paddr, insn[0], insn[1], insn[2], insn[3], getreg(eip));
2357 reinject:
2358 /* Inject trap into Guest. */
2359 if (write(lguest_fd, args, sizeof(args)) < 0)
2360 err(1, "Reinjecting trap 14 for fault at %#x",
2361 getreg(eip));
2362 return;
2363 }
1187 2364
1188 /* p might not be aligned, so we memcpy in. */ 2365 /* Finally, we've "done" the instruction, so move past it. */
1189 return memcpy(p, &d, sizeof(d)); 2366 setreg(eip, getreg(eip) + insnlen);
1190} 2367}
1191 2368
1192/* 2369/*L:190
1193 * Each device descriptor is followed by the description of its virtqueues. We 2370 * Device Setup
1194 * specify how many descriptors the virtqueue is to have. 2371 *
2372 * All devices need a descriptor so the Guest knows it exists, and a "struct
2373 * device" so the Launcher can keep track of it. We have common helper
2374 * routines to allocate and manage them.
1195 */ 2375 */
1196static void add_virtqueue(struct device *dev, unsigned int num_descs, 2376static void add_pci_virtqueue(struct device *dev,
1197 void (*service)(struct virtqueue *)) 2377 void (*service)(struct virtqueue *),
2378 const char *name)
1198{ 2379{
1199 unsigned int pages;
1200 struct virtqueue **i, *vq = malloc(sizeof(*vq)); 2380 struct virtqueue **i, *vq = malloc(sizeof(*vq));
1201 void *p;
1202
1203 /* First we need some memory for this virtqueue. */
1204 pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1)
1205 / getpagesize();
1206 p = get_pages(pages);
1207 2381
1208 /* Initialize the virtqueue */ 2382 /* Initialize the virtqueue */
1209 vq->next = NULL; 2383 vq->next = NULL;
1210 vq->last_avail_idx = 0; 2384 vq->last_avail_idx = 0;
1211 vq->dev = dev; 2385 vq->dev = dev;
2386 vq->name = name;
1212 2387
1213 /* 2388 /*
1214 * This is the routine the service thread will run, and its Process ID 2389 * This is the routine the service thread will run, and its Process ID
@@ -1218,25 +2393,11 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs,
1218 vq->thread = (pid_t)-1; 2393 vq->thread = (pid_t)-1;
1219 2394
1220 /* Initialize the configuration. */ 2395 /* Initialize the configuration. */
1221 vq->config.num = num_descs; 2396 reset_vq_pci_config(vq);
1222 vq->config.irq = devices.next_irq++; 2397 vq->pci_config.queue_notify_off = 0;
1223 vq->config.pfn = to_guest_phys(p) / getpagesize();
1224
1225 /* Initialize the vring. */
1226 vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN);
1227
1228 /*
1229 * Append virtqueue to this device's descriptor. We use
1230 * device_config() to get the end of the device's current virtqueues;
1231 * we check that we haven't added any config or feature information
1232 * yet, otherwise we'd be overwriting them.
1233 */
1234 assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0);
1235 memcpy(device_config(dev), &vq->config, sizeof(vq->config));
1236 dev->num_vq++;
1237 dev->desc->num_vq++;
1238 2398
1239 verbose("Virtqueue page %#lx\n", to_guest_phys(p)); 2399 /* Add one to the number of queues */
2400 vq->dev->mmio->cfg.num_queues++;
1240 2401
1241 /* 2402 /*
1242 * Add to tail of list, so dev->vq is first vq, dev->vq->next is 2403 * Add to tail of list, so dev->vq is first vq, dev->vq->next is
@@ -1246,73 +2407,239 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs,
1246 *i = vq; 2407 *i = vq;
1247} 2408}
1248 2409
1249/* 2410/* The Guest accesses the feature bits via the PCI common config MMIO region */
1250 * The first half of the feature bitmask is for us to advertise features. The 2411static void add_pci_feature(struct device *dev, unsigned bit)
1251 * second half is for the Guest to accept features.
1252 */
1253static void add_feature(struct device *dev, unsigned bit)
1254{ 2412{
1255 u8 *features = get_feature_bits(dev); 2413 dev->features |= (1ULL << bit);
2414}
1256 2415
1257 /* We can't extend the feature bits once we've added config bytes */ 2416/* For devices with no config. */
1258 if (dev->desc->feature_len <= bit / CHAR_BIT) { 2417static void no_device_config(struct device *dev)
1259 assert(dev->desc->config_len == 0); 2418{
1260 dev->feature_len = dev->desc->feature_len = (bit/CHAR_BIT) + 1; 2419 dev->mmio_addr = get_mmio_region(dev->mmio_size);
1261 }
1262 2420
1263 features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); 2421 dev->config.bar[0] = dev->mmio_addr;
2422 /* Bottom 4 bits must be zero */
2423 assert(~(dev->config.bar[0] & 0xF));
2424}
2425
2426/* This puts the device config into BAR0 */
2427static void set_device_config(struct device *dev, const void *conf, size_t len)
2428{
2429 /* Set up BAR 0 */
2430 dev->mmio_size += len;
2431 dev->mmio = realloc(dev->mmio, dev->mmio_size);
2432 memcpy(dev->mmio + 1, conf, len);
2433
2434 /*
2435 * 4.1.4.6:
2436 *
2437 * The device MUST present at least one VIRTIO_PCI_CAP_DEVICE_CFG
2438 * capability for any device type which has a device-specific
2439 * configuration.
2440 */
2441 /* Hook up device cfg */
2442 dev->config.cfg_access.cap.cap_next
2443 = offsetof(struct pci_config, device);
2444
2445 /*
2446 * 4.1.4.6.1:
2447 *
2448 * The offset for the device-specific configuration MUST be 4-byte
2449 * aligned.
2450 */
2451 assert(dev->config.cfg_access.cap.cap_next % 4 == 0);
2452
2453 /* Fix up device cfg field length. */
2454 dev->config.device.length = len;
2455
2456 /* The rest is the same as the no-config case */
2457 no_device_config(dev);
2458}
2459
2460static void init_cap(struct virtio_pci_cap *cap, size_t caplen, int type,
2461 size_t bar_offset, size_t bar_bytes, u8 next)
2462{
2463 cap->cap_vndr = PCI_CAP_ID_VNDR;
2464 cap->cap_next = next;
2465 cap->cap_len = caplen;
2466 cap->cfg_type = type;
2467 cap->bar = 0;
2468 memset(cap->padding, 0, sizeof(cap->padding));
2469 cap->offset = bar_offset;
2470 cap->length = bar_bytes;
1264} 2471}
1265 2472
1266/* 2473/*
1267 * This routine sets the configuration fields for an existing device's 2474 * This sets up the pci_config structure, as defined in the virtio 1.0
1268 * descriptor. It only works for the last device, but that's OK because that's 2475 * standard (and PCI standard).
1269 * how we use it.
1270 */ 2476 */
1271static void set_config(struct device *dev, unsigned len, const void *conf) 2477static void init_pci_config(struct pci_config *pci, u16 type,
2478 u8 class, u8 subclass)
1272{ 2479{
1273 /* Check we haven't overflowed our single page. */ 2480 size_t bar_offset, bar_len;
1274 if (device_config(dev) + len > devices.descpage + getpagesize()) 2481
1275 errx(1, "Too many devices"); 2482 /*
2483 * 4.1.4.4.1:
2484 *
2485 * The device MUST either present notify_off_multiplier as an even
2486 * power of 2, or present notify_off_multiplier as 0.
2487 *
2488 * 2.1.2:
2489 *
2490 * The device MUST initialize device status to 0 upon reset.
2491 */
2492 memset(pci, 0, sizeof(*pci));
2493
2494 /* 4.1.2.1: Devices MUST have the PCI Vendor ID 0x1AF4 */
2495 pci->vendor_id = 0x1AF4;
2496 /* 4.1.2.1: ... PCI Device ID calculated by adding 0x1040 ... */
2497 pci->device_id = 0x1040 + type;
2498
2499 /*
2500 * PCI have specific codes for different types of devices.
2501 * Linux doesn't care, but it's a good clue for people looking
2502 * at the device.
2503 */
2504 pci->class = class;
2505 pci->subclass = subclass;
2506
2507 /*
2508 * 4.1.2.1:
2509 *
2510 * Non-transitional devices SHOULD have a PCI Revision ID of 1 or
2511 * higher
2512 */
2513 pci->revid = 1;
2514
2515 /*
2516 * 4.1.2.1:
2517 *
2518 * Non-transitional devices SHOULD have a PCI Subsystem Device ID of
2519 * 0x40 or higher.
2520 */
2521 pci->subsystem_device_id = 0x40;
2522
2523 /* We use our dummy interrupt controller, and irq_line is the irq */
2524 pci->irq_line = devices.next_irq++;
2525 pci->irq_pin = 0;
2526
2527 /* Support for extended capabilities. */
2528 pci->status = (1 << 4);
2529
2530 /* Link them in. */
2531 /*
2532 * 4.1.4.3.1:
2533 *
2534 * The device MUST present at least one common configuration
2535 * capability.
2536 */
2537 pci->capabilities = offsetof(struct pci_config, common);
2538
2539 /* 4.1.4.3.1 ... offset MUST be 4-byte aligned. */
2540 assert(pci->capabilities % 4 == 0);
2541
2542 bar_offset = offsetof(struct virtio_pci_mmio, cfg);
2543 bar_len = sizeof(((struct virtio_pci_mmio *)0)->cfg);
2544 init_cap(&pci->common, sizeof(pci->common), VIRTIO_PCI_CAP_COMMON_CFG,
2545 bar_offset, bar_len,
2546 offsetof(struct pci_config, notify));
2547
2548 /*
2549 * 4.1.4.4.1:
2550 *
2551 * The device MUST present at least one notification capability.
2552 */
2553 bar_offset += bar_len;
2554 bar_len = sizeof(((struct virtio_pci_mmio *)0)->notify);
2555
2556 /*
2557 * 4.1.4.4.1:
2558 *
2559 * The cap.offset MUST be 2-byte aligned.
2560 */
2561 assert(pci->common.cap_next % 2 == 0);
2562
2563 /* FIXME: Use a non-zero notify_off, for per-queue notification? */
2564 /*
2565 * 4.1.4.4.1:
2566 *
2567 * The value cap.length presented by the device MUST be at least 2 and
2568 * MUST be large enough to support queue notification offsets for all
2569 * supported queues in all possible configurations.
2570 */
2571 assert(bar_len >= 2);
2572
2573 init_cap(&pci->notify.cap, sizeof(pci->notify),
2574 VIRTIO_PCI_CAP_NOTIFY_CFG,
2575 bar_offset, bar_len,
2576 offsetof(struct pci_config, isr));
2577
2578 bar_offset += bar_len;
2579 bar_len = sizeof(((struct virtio_pci_mmio *)0)->isr);
2580 /*
2581 * 4.1.4.5.1:
2582 *
2583 * The device MUST present at least one VIRTIO_PCI_CAP_ISR_CFG
2584 * capability.
2585 */
2586 init_cap(&pci->isr, sizeof(pci->isr),
2587 VIRTIO_PCI_CAP_ISR_CFG,
2588 bar_offset, bar_len,
2589 offsetof(struct pci_config, cfg_access));
2590
2591 /*
2592 * 4.1.4.7.1:
2593 *
2594 * The device MUST present at least one VIRTIO_PCI_CAP_PCI_CFG
2595 * capability.
2596 */
2597 /* This doesn't have any presence in the BAR */
2598 init_cap(&pci->cfg_access.cap, sizeof(pci->cfg_access),
2599 VIRTIO_PCI_CAP_PCI_CFG,
2600 0, 0, 0);
1276 2601
1277 /* Copy in the config information, and store the length. */ 2602 bar_offset += bar_len + sizeof(((struct virtio_pci_mmio *)0)->padding);
1278 memcpy(device_config(dev), conf, len); 2603 assert(bar_offset == sizeof(struct virtio_pci_mmio));
1279 dev->desc->config_len = len;
1280 2604
1281 /* Size must fit in config_len field (8 bits)! */ 2605 /*
1282 assert(dev->desc->config_len == len); 2606 * This gets sewn in and length set in set_device_config().
2607 * Some devices don't have a device configuration interface, so
2608 * we never expose this if we don't call set_device_config().
2609 */
2610 init_cap(&pci->device, sizeof(pci->device), VIRTIO_PCI_CAP_DEVICE_CFG,
2611 bar_offset, 0, 0);
1283} 2612}
1284 2613
1285/* 2614/*
1286 * This routine does all the creation and setup of a new device, including 2615 * This routine does all the creation and setup of a new device, but we don't
1287 * calling new_dev_desc() to allocate the descriptor and device memory. We 2616 * actually place the MMIO region until we know the size (if any) of the
1288 * don't actually start the service threads until later. 2617 * device-specific config. And we don't actually start the service threads
2618 * until later.
1289 * 2619 *
1290 * See what I mean about userspace being boring? 2620 * See what I mean about userspace being boring?
1291 */ 2621 */
1292static struct device *new_device(const char *name, u16 type) 2622static struct device *new_pci_device(const char *name, u16 type,
2623 u8 class, u8 subclass)
1293{ 2624{
1294 struct device *dev = malloc(sizeof(*dev)); 2625 struct device *dev = malloc(sizeof(*dev));
1295 2626
1296 /* Now we populate the fields one at a time. */ 2627 /* Now we populate the fields one at a time. */
1297 dev->desc = new_dev_desc(type);
1298 dev->name = name; 2628 dev->name = name;
1299 dev->vq = NULL; 2629 dev->vq = NULL;
1300 dev->feature_len = 0;
1301 dev->num_vq = 0;
1302 dev->running = false; 2630 dev->running = false;
1303 dev->next = NULL; 2631 dev->wrote_features_ok = false;
2632 dev->mmio_size = sizeof(struct virtio_pci_mmio);
2633 dev->mmio = calloc(1, dev->mmio_size);
2634 dev->features = (u64)1 << VIRTIO_F_VERSION_1;
2635 dev->features_accepted = 0;
1304 2636
1305 /* 2637 if (devices.device_num + 1 >= MAX_PCI_DEVICES)
1306 * Append to device list. Prepending to a single-linked list is 2638 errx(1, "Can only handle 31 PCI devices");
1307 * easier, but the user expects the devices to be arranged on the bus 2639
1308 * in command-line order. The first network device on the command line 2640 init_pci_config(&dev->config, type, class, subclass);
1309 * is eth0, the first block device /dev/vda, etc. 2641 assert(!devices.pci[devices.device_num+1]);
1310 */ 2642 devices.pci[++devices.device_num] = dev;
1311 if (devices.lastdev)
1312 devices.lastdev->next = dev;
1313 else
1314 devices.dev = dev;
1315 devices.lastdev = dev;
1316 2643
1317 return dev; 2644 return dev;
1318} 2645}
@@ -1324,6 +2651,7 @@ static struct device *new_device(const char *name, u16 type)
1324static void setup_console(void) 2651static void setup_console(void)
1325{ 2652{
1326 struct device *dev; 2653 struct device *dev;
2654 struct virtio_console_config conf;
1327 2655
1328 /* If we can save the initial standard input settings... */ 2656 /* If we can save the initial standard input settings... */
1329 if (tcgetattr(STDIN_FILENO, &orig_term) == 0) { 2657 if (tcgetattr(STDIN_FILENO, &orig_term) == 0) {
@@ -1336,7 +2664,7 @@ static void setup_console(void)
1336 tcsetattr(STDIN_FILENO, TCSANOW, &term); 2664 tcsetattr(STDIN_FILENO, TCSANOW, &term);
1337 } 2665 }
1338 2666
1339 dev = new_device("console", VIRTIO_ID_CONSOLE); 2667 dev = new_pci_device("console", VIRTIO_ID_CONSOLE, 0x07, 0x00);
1340 2668
1341 /* We store the console state in dev->priv, and initialize it. */ 2669 /* We store the console state in dev->priv, and initialize it. */
1342 dev->priv = malloc(sizeof(struct console_abort)); 2670 dev->priv = malloc(sizeof(struct console_abort));
@@ -1348,10 +2676,14 @@ static void setup_console(void)
1348 * stdin. When they put something in the output queue, we write it to 2676 * stdin. When they put something in the output queue, we write it to
1349 * stdout. 2677 * stdout.
1350 */ 2678 */
1351 add_virtqueue(dev, VIRTQUEUE_NUM, console_input); 2679 add_pci_virtqueue(dev, console_input, "input");
1352 add_virtqueue(dev, VIRTQUEUE_NUM, console_output); 2680 add_pci_virtqueue(dev, console_output, "output");
2681
2682 /* We need a configuration area for the emerg_wr early writes. */
2683 add_pci_feature(dev, VIRTIO_CONSOLE_F_EMERG_WRITE);
2684 set_device_config(dev, &conf, sizeof(conf));
1353 2685
1354 verbose("device %u: console\n", ++devices.device_num); 2686 verbose("device %u: console\n", devices.device_num);
1355} 2687}
1356/*:*/ 2688/*:*/
1357 2689
@@ -1449,6 +2781,7 @@ static void configure_device(int fd, const char *tapif, u32 ipaddr)
1449static int get_tun_device(char tapif[IFNAMSIZ]) 2781static int get_tun_device(char tapif[IFNAMSIZ])
1450{ 2782{
1451 struct ifreq ifr; 2783 struct ifreq ifr;
2784 int vnet_hdr_sz;
1452 int netfd; 2785 int netfd;
1453 2786
1454 /* Start with this zeroed. Messy but sure. */ 2787 /* Start with this zeroed. Messy but sure. */
@@ -1476,6 +2809,18 @@ static int get_tun_device(char tapif[IFNAMSIZ])
1476 */ 2809 */
1477 ioctl(netfd, TUNSETNOCSUM, 1); 2810 ioctl(netfd, TUNSETNOCSUM, 1);
1478 2811
2812 /*
2813 * In virtio before 1.0 (aka legacy virtio), we added a 16-bit
2814 * field at the end of the network header iff
2815 * VIRTIO_NET_F_MRG_RXBUF was negotiated. For virtio 1.0,
2816 * that became the norm, but we need to tell the tun device
2817 * about our expanded header (which is called
2818 * virtio_net_hdr_mrg_rxbuf in the legacy system).
2819 */
2820 vnet_hdr_sz = sizeof(struct virtio_net_hdr_v1);
2821 if (ioctl(netfd, TUNSETVNETHDRSZ, &vnet_hdr_sz) != 0)
2822 err(1, "Setting tun header size to %u", vnet_hdr_sz);
2823
1479 memcpy(tapif, ifr.ifr_name, IFNAMSIZ); 2824 memcpy(tapif, ifr.ifr_name, IFNAMSIZ);
1480 return netfd; 2825 return netfd;
1481} 2826}
@@ -1499,12 +2844,12 @@ static void setup_tun_net(char *arg)
1499 net_info->tunfd = get_tun_device(tapif); 2844 net_info->tunfd = get_tun_device(tapif);
1500 2845
1501 /* First we create a new network device. */ 2846 /* First we create a new network device. */
1502 dev = new_device("net", VIRTIO_ID_NET); 2847 dev = new_pci_device("net", VIRTIO_ID_NET, 0x02, 0x00);
1503 dev->priv = net_info; 2848 dev->priv = net_info;
1504 2849
1505 /* Network devices need a recv and a send queue, just like console. */ 2850 /* Network devices need a recv and a send queue, just like console. */
1506 add_virtqueue(dev, VIRTQUEUE_NUM, net_input); 2851 add_pci_virtqueue(dev, net_input, "rx");
1507 add_virtqueue(dev, VIRTQUEUE_NUM, net_output); 2852 add_pci_virtqueue(dev, net_output, "tx");
1508 2853
1509 /* 2854 /*
1510 * We need a socket to perform the magic network ioctls to bring up the 2855 * We need a socket to perform the magic network ioctls to bring up the
@@ -1524,7 +2869,7 @@ static void setup_tun_net(char *arg)
1524 p = strchr(arg, ':'); 2869 p = strchr(arg, ':');
1525 if (p) { 2870 if (p) {
1526 str2mac(p+1, conf.mac); 2871 str2mac(p+1, conf.mac);
1527 add_feature(dev, VIRTIO_NET_F_MAC); 2872 add_pci_feature(dev, VIRTIO_NET_F_MAC);
1528 *p = '\0'; 2873 *p = '\0';
1529 } 2874 }
1530 2875
@@ -1538,25 +2883,21 @@ static void setup_tun_net(char *arg)
1538 configure_device(ipfd, tapif, ip); 2883 configure_device(ipfd, tapif, ip);
1539 2884
1540 /* Expect Guest to handle everything except UFO */ 2885 /* Expect Guest to handle everything except UFO */
1541 add_feature(dev, VIRTIO_NET_F_CSUM); 2886 add_pci_feature(dev, VIRTIO_NET_F_CSUM);
1542 add_feature(dev, VIRTIO_NET_F_GUEST_CSUM); 2887 add_pci_feature(dev, VIRTIO_NET_F_GUEST_CSUM);
1543 add_feature(dev, VIRTIO_NET_F_GUEST_TSO4); 2888 add_pci_feature(dev, VIRTIO_NET_F_GUEST_TSO4);
1544 add_feature(dev, VIRTIO_NET_F_GUEST_TSO6); 2889 add_pci_feature(dev, VIRTIO_NET_F_GUEST_TSO6);
1545 add_feature(dev, VIRTIO_NET_F_GUEST_ECN); 2890 add_pci_feature(dev, VIRTIO_NET_F_GUEST_ECN);
1546 add_feature(dev, VIRTIO_NET_F_HOST_TSO4); 2891 add_pci_feature(dev, VIRTIO_NET_F_HOST_TSO4);
1547 add_feature(dev, VIRTIO_NET_F_HOST_TSO6); 2892 add_pci_feature(dev, VIRTIO_NET_F_HOST_TSO6);
1548 add_feature(dev, VIRTIO_NET_F_HOST_ECN); 2893 add_pci_feature(dev, VIRTIO_NET_F_HOST_ECN);
1549 /* We handle indirect ring entries */ 2894 /* We handle indirect ring entries */
1550 add_feature(dev, VIRTIO_RING_F_INDIRECT_DESC); 2895 add_pci_feature(dev, VIRTIO_RING_F_INDIRECT_DESC);
1551 /* We're compliant with the damn spec. */ 2896 set_device_config(dev, &conf, sizeof(conf));
1552 add_feature(dev, VIRTIO_F_ANY_LAYOUT);
1553 set_config(dev, sizeof(conf), &conf);
1554 2897
1555 /* We don't need the socket any more; setup is done. */ 2898 /* We don't need the socket any more; setup is done. */
1556 close(ipfd); 2899 close(ipfd);
1557 2900
1558 devices.device_num++;
1559
1560 if (bridging) 2901 if (bridging)
1561 verbose("device %u: tun %s attached to bridge: %s\n", 2902 verbose("device %u: tun %s attached to bridge: %s\n",
1562 devices.device_num, tapif, arg); 2903 devices.device_num, tapif, arg);
@@ -1607,7 +2948,7 @@ static void blk_request(struct virtqueue *vq)
1607 head = wait_for_vq_desc(vq, iov, &out_num, &in_num); 2948 head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
1608 2949
1609 /* Copy the output header from the front of the iov (adjusts iov) */ 2950 /* Copy the output header from the front of the iov (adjusts iov) */
1610 iov_consume(iov, out_num, &out, sizeof(out)); 2951 iov_consume(vq->dev, iov, out_num, &out, sizeof(out));
1611 2952
1612 /* Find and trim end of iov input array, for our status byte. */ 2953 /* Find and trim end of iov input array, for our status byte. */
1613 in = NULL; 2954 in = NULL;
@@ -1619,7 +2960,7 @@ static void blk_request(struct virtqueue *vq)
1619 } 2960 }
1620 } 2961 }
1621 if (!in) 2962 if (!in)
1622 errx(1, "Bad virtblk cmd with no room for status"); 2963 bad_driver_vq(vq, "Bad virtblk cmd with no room for status");
1623 2964
1624 /* 2965 /*
1625 * For historical reasons, block operations are expressed in 512 byte 2966 * For historical reasons, block operations are expressed in 512 byte
@@ -1627,15 +2968,7 @@ static void blk_request(struct virtqueue *vq)
1627 */ 2968 */
1628 off = out.sector * 512; 2969 off = out.sector * 512;
1629 2970
1630 /* 2971 if (out.type & VIRTIO_BLK_T_OUT) {
1631 * In general the virtio block driver is allowed to try SCSI commands.
1632 * It'd be nice if we supported eject, for example, but we don't.
1633 */
1634 if (out.type & VIRTIO_BLK_T_SCSI_CMD) {
1635 fprintf(stderr, "Scsi commands unsupported\n");
1636 *in = VIRTIO_BLK_S_UNSUPP;
1637 wlen = sizeof(*in);
1638 } else if (out.type & VIRTIO_BLK_T_OUT) {
1639 /* 2972 /*
1640 * Write 2973 * Write
1641 * 2974 *
@@ -1657,7 +2990,7 @@ static void blk_request(struct virtqueue *vq)
1657 /* Trim it back to the correct length */ 2990 /* Trim it back to the correct length */
1658 ftruncate64(vblk->fd, vblk->len); 2991 ftruncate64(vblk->fd, vblk->len);
1659 /* Die, bad Guest, die. */ 2992 /* Die, bad Guest, die. */
1660 errx(1, "Write past end %llu+%u", off, ret); 2993 bad_driver_vq(vq, "Write past end %llu+%u", off, ret);
1661 } 2994 }
1662 2995
1663 wlen = sizeof(*in); 2996 wlen = sizeof(*in);
@@ -1699,11 +3032,11 @@ static void setup_block_file(const char *filename)
1699 struct vblk_info *vblk; 3032 struct vblk_info *vblk;
1700 struct virtio_blk_config conf; 3033 struct virtio_blk_config conf;
1701 3034
1702 /* Creat the device. */ 3035 /* Create the device. */
1703 dev = new_device("block", VIRTIO_ID_BLOCK); 3036 dev = new_pci_device("block", VIRTIO_ID_BLOCK, 0x01, 0x80);
1704 3037
1705 /* The device has one virtqueue, where the Guest places requests. */ 3038 /* The device has one virtqueue, where the Guest places requests. */
1706 add_virtqueue(dev, VIRTQUEUE_NUM, blk_request); 3039 add_pci_virtqueue(dev, blk_request, "request");
1707 3040
1708 /* Allocate the room for our own bookkeeping */ 3041 /* Allocate the room for our own bookkeeping */
1709 vblk = dev->priv = malloc(sizeof(*vblk)); 3042 vblk = dev->priv = malloc(sizeof(*vblk));
@@ -1712,9 +3045,6 @@ static void setup_block_file(const char *filename)
1712 vblk->fd = open_or_die(filename, O_RDWR|O_LARGEFILE); 3045 vblk->fd = open_or_die(filename, O_RDWR|O_LARGEFILE);
1713 vblk->len = lseek64(vblk->fd, 0, SEEK_END); 3046 vblk->len = lseek64(vblk->fd, 0, SEEK_END);
1714 3047
1715 /* We support FLUSH. */
1716 add_feature(dev, VIRTIO_BLK_F_FLUSH);
1717
1718 /* Tell Guest how many sectors this device has. */ 3048 /* Tell Guest how many sectors this device has. */
1719 conf.capacity = cpu_to_le64(vblk->len / 512); 3049 conf.capacity = cpu_to_le64(vblk->len / 512);
1720 3050
@@ -1722,20 +3052,19 @@ static void setup_block_file(const char *filename)
1722 * Tell Guest not to put in too many descriptors at once: two are used 3052 * Tell Guest not to put in too many descriptors at once: two are used
1723 * for the in and out elements. 3053 * for the in and out elements.
1724 */ 3054 */
1725 add_feature(dev, VIRTIO_BLK_F_SEG_MAX); 3055 add_pci_feature(dev, VIRTIO_BLK_F_SEG_MAX);
1726 conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2); 3056 conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2);
1727 3057
1728 /* Don't try to put whole struct: we have 8 bit limit. */ 3058 set_device_config(dev, &conf, sizeof(struct virtio_blk_config));
1729 set_config(dev, offsetof(struct virtio_blk_config, geometry), &conf);
1730 3059
1731 verbose("device %u: virtblock %llu sectors\n", 3060 verbose("device %u: virtblock %llu sectors\n",
1732 ++devices.device_num, le64_to_cpu(conf.capacity)); 3061 devices.device_num, le64_to_cpu(conf.capacity));
1733} 3062}
1734 3063
1735/*L:211 3064/*L:211
1736 * Our random number generator device reads from /dev/random into the Guest's 3065 * Our random number generator device reads from /dev/urandom into the Guest's
1737 * input buffers. The usual case is that the Guest doesn't want random numbers 3066 * input buffers. The usual case is that the Guest doesn't want random numbers
1738 * and so has no buffers although /dev/random is still readable, whereas 3067 * and so has no buffers although /dev/urandom is still readable, whereas
1739 * console is the reverse. 3068 * console is the reverse.
1740 * 3069 *
1741 * The same logic applies, however. 3070 * The same logic applies, however.
@@ -1754,7 +3083,7 @@ static void rng_input(struct virtqueue *vq)
1754 /* First we need a buffer from the Guests's virtqueue. */ 3083 /* First we need a buffer from the Guests's virtqueue. */
1755 head = wait_for_vq_desc(vq, iov, &out_num, &in_num); 3084 head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
1756 if (out_num) 3085 if (out_num)
1757 errx(1, "Output buffers in rng?"); 3086 bad_driver_vq(vq, "Output buffers in rng?");
1758 3087
1759 /* 3088 /*
1760 * Just like the console write, we loop to cover the whole iovec. 3089 * Just like the console write, we loop to cover the whole iovec.
@@ -1763,8 +3092,8 @@ static void rng_input(struct virtqueue *vq)
1763 while (!iov_empty(iov, in_num)) { 3092 while (!iov_empty(iov, in_num)) {
1764 len = readv(rng_info->rfd, iov, in_num); 3093 len = readv(rng_info->rfd, iov, in_num);
1765 if (len <= 0) 3094 if (len <= 0)
1766 err(1, "Read from /dev/random gave %i", len); 3095 err(1, "Read from /dev/urandom gave %i", len);
1767 iov_consume(iov, in_num, NULL, len); 3096 iov_consume(vq->dev, iov, in_num, NULL, len);
1768 totlen += len; 3097 totlen += len;
1769 } 3098 }
1770 3099
@@ -1780,17 +3109,20 @@ static void setup_rng(void)
1780 struct device *dev; 3109 struct device *dev;
1781 struct rng_info *rng_info = malloc(sizeof(*rng_info)); 3110 struct rng_info *rng_info = malloc(sizeof(*rng_info));
1782 3111
1783 /* Our device's privat info simply contains the /dev/random fd. */ 3112 /* Our device's private info simply contains the /dev/urandom fd. */
1784 rng_info->rfd = open_or_die("/dev/random", O_RDONLY); 3113 rng_info->rfd = open_or_die("/dev/urandom", O_RDONLY);
1785 3114
1786 /* Create the new device. */ 3115 /* Create the new device. */
1787 dev = new_device("rng", VIRTIO_ID_RNG); 3116 dev = new_pci_device("rng", VIRTIO_ID_RNG, 0xff, 0);
1788 dev->priv = rng_info; 3117 dev->priv = rng_info;
1789 3118
1790 /* The device has one virtqueue, where the Guest places inbufs. */ 3119 /* The device has one virtqueue, where the Guest places inbufs. */
1791 add_virtqueue(dev, VIRTQUEUE_NUM, rng_input); 3120 add_pci_virtqueue(dev, rng_input, "input");
1792 3121
1793 verbose("device %u: rng\n", devices.device_num++); 3122 /* We don't have any configuration space */
3123 no_device_config(dev);
3124
3125 verbose("device %u: rng\n", devices.device_num);
1794} 3126}
1795/* That's the end of device setup. */ 3127/* That's the end of device setup. */
1796 3128
@@ -1820,17 +3152,23 @@ static void __attribute__((noreturn)) restart_guest(void)
1820static void __attribute__((noreturn)) run_guest(void) 3152static void __attribute__((noreturn)) run_guest(void)
1821{ 3153{
1822 for (;;) { 3154 for (;;) {
1823 unsigned long notify_addr; 3155 struct lguest_pending notify;
1824 int readval; 3156 int readval;
1825 3157
1826 /* We read from the /dev/lguest device to run the Guest. */ 3158 /* We read from the /dev/lguest device to run the Guest. */
1827 readval = pread(lguest_fd, &notify_addr, 3159 readval = pread(lguest_fd, &notify, sizeof(notify), cpu_id);
1828 sizeof(notify_addr), cpu_id); 3160 if (readval == sizeof(notify)) {
1829 3161 if (notify.trap == 13) {
1830 /* One unsigned long means the Guest did HCALL_NOTIFY */ 3162 verbose("Emulating instruction at %#x\n",
1831 if (readval == sizeof(notify_addr)) { 3163 getreg(eip));
1832 verbose("Notify on address %#lx\n", notify_addr); 3164 emulate_insn(notify.insn);
1833 handle_output(notify_addr); 3165 } else if (notify.trap == 14) {
3166 verbose("Emulating MMIO at %#x\n",
3167 getreg(eip));
3168 emulate_mmio(notify.addr, notify.insn);
3169 } else
3170 errx(1, "Unknown trap %i addr %#08x\n",
3171 notify.trap, notify.addr);
1834 /* ENOENT means the Guest died. Reading tells us why. */ 3172 /* ENOENT means the Guest died. Reading tells us why. */
1835 } else if (errno == ENOENT) { 3173 } else if (errno == ENOENT) {
1836 char reason[1024] = { 0 }; 3174 char reason[1024] = { 0 };
@@ -1893,11 +3231,9 @@ int main(int argc, char *argv[])
1893 main_args = argv; 3231 main_args = argv;
1894 3232
1895 /* 3233 /*
1896 * First we initialize the device list. We keep a pointer to the last 3234 * First we initialize the device list. We remember next interrupt
1897 * device, and the next interrupt number to use for devices (1: 3235 * number to use for devices (1: remember that 0 is used by the timer).
1898 * remember that 0 is used by the timer).
1899 */ 3236 */
1900 devices.lastdev = NULL;
1901 devices.next_irq = 1; 3237 devices.next_irq = 1;
1902 3238
1903 /* We're CPU 0. In fact, that's the only CPU possible right now. */ 3239 /* We're CPU 0. In fact, that's the only CPU possible right now. */
@@ -1921,12 +3257,14 @@ int main(int argc, char *argv[])
1921 guest_base = map_zeroed_pages(mem / getpagesize() 3257 guest_base = map_zeroed_pages(mem / getpagesize()
1922 + DEVICE_PAGES); 3258 + DEVICE_PAGES);
1923 guest_limit = mem; 3259 guest_limit = mem;
1924 guest_max = mem + DEVICE_PAGES*getpagesize(); 3260 guest_max = guest_mmio = mem + DEVICE_PAGES*getpagesize();
1925 devices.descpage = get_pages(1);
1926 break; 3261 break;
1927 } 3262 }
1928 } 3263 }
1929 3264
3265 /* We always have a console device, and it's always device 1. */
3266 setup_console();
3267
1930 /* The options are fairly straight-forward */ 3268 /* The options are fairly straight-forward */
1931 while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) { 3269 while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) {
1932 switch (c) { 3270 switch (c) {
@@ -1967,8 +3305,8 @@ int main(int argc, char *argv[])
1967 3305
1968 verbose("Guest base is at %p\n", guest_base); 3306 verbose("Guest base is at %p\n", guest_base);
1969 3307
1970 /* We always have a console device */ 3308 /* Initialize the (fake) PCI host bridge device. */
1971 setup_console(); 3309 init_pci_host_bridge();
1972 3310
1973 /* Now we load the kernel */ 3311 /* Now we load the kernel */
1974 start = load_kernel(open_or_die(argv[optind+1], O_RDONLY)); 3312 start = load_kernel(open_or_die(argv[optind+1], O_RDONLY));
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
index 6c14afe8c1b1..db1d3a29d97f 100644
--- a/tools/perf/bench/mem-memcpy.c
+++ b/tools/perf/bench/mem-memcpy.c
@@ -289,7 +289,7 @@ static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault)
289 memcpy_t fn = r->fn.memcpy; 289 memcpy_t fn = r->fn.memcpy;
290 int i; 290 int i;
291 291
292 memcpy_alloc_mem(&src, &dst, len); 292 memcpy_alloc_mem(&dst, &src, len);
293 293
294 if (prefault) 294 if (prefault)
295 fn(dst, src, len); 295 fn(dst, src, len);
@@ -312,7 +312,7 @@ static double do_memcpy_gettimeofday(const struct routine *r, size_t len,
312 void *src = NULL, *dst = NULL; 312 void *src = NULL, *dst = NULL;
313 int i; 313 int i;
314 314
315 memcpy_alloc_mem(&src, &dst, len); 315 memcpy_alloc_mem(&dst, &src, len);
316 316
317 if (prefault) 317 if (prefault)
318 fn(dst, src, len); 318 fn(dst, src, len);
diff --git a/tools/perf/config/Makefile.arch b/tools/perf/config/Makefile.arch
index ff95a68741d1..ac8721ffa6c8 100644
--- a/tools/perf/config/Makefile.arch
+++ b/tools/perf/config/Makefile.arch
@@ -21,6 +21,10 @@ ifeq ($(RAW_ARCH),x86_64)
21 endif 21 endif
22endif 22endif
23 23
24ifeq ($(RAW_ARCH),sparc64)
25 ARCH ?= sparc
26endif
27
24ARCH ?= $(RAW_ARCH) 28ARCH ?= $(RAW_ARCH)
25 29
26LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) 30LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile
index 42ac05aaf8ac..b32ff3372514 100644
--- a/tools/perf/config/feature-checks/Makefile
+++ b/tools/perf/config/feature-checks/Makefile
@@ -49,7 +49,7 @@ test-hello.bin:
49 $(BUILD) 49 $(BUILD)
50 50
51test-pthread-attr-setaffinity-np.bin: 51test-pthread-attr-setaffinity-np.bin:
52 $(BUILD) -Werror -lpthread 52 $(BUILD) -D_GNU_SOURCE -Werror -lpthread
53 53
54test-stackprotector-all.bin: 54test-stackprotector-all.bin:
55 $(BUILD) -Werror -fstack-protector-all 55 $(BUILD) -Werror -fstack-protector-all
diff --git a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
index 0a0d3ecb4e8a..2b81b72eca23 100644
--- a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
+++ b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
@@ -5,10 +5,11 @@ int main(void)
5{ 5{
6 int ret = 0; 6 int ret = 0;
7 pthread_attr_t thread_attr; 7 pthread_attr_t thread_attr;
8 cpu_set_t cs;
8 9
9 pthread_attr_init(&thread_attr); 10 pthread_attr_init(&thread_attr);
10 /* don't care abt exact args, just the API itself in libpthread */ 11 /* don't care abt exact args, just the API itself in libpthread */
11 ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL); 12 ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs);
12 13
13 return ret; 14 return ret;
14} 15}
diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
index 47b78b3f0325..6da965bdbc2c 100644
--- a/tools/perf/util/cloexec.c
+++ b/tools/perf/util/cloexec.c
@@ -25,6 +25,10 @@ static int perf_flag_probe(void)
25 if (cpu < 0) 25 if (cpu < 0)
26 cpu = 0; 26 cpu = 0;
27 27
28 /*
29 * Using -1 for the pid is a workaround to avoid gratuitous jump label
30 * changes.
31 */
28 while (1) { 32 while (1) {
29 /* check cloexec flag */ 33 /* check cloexec flag */
30 fd = sys_perf_event_open(&attr, pid, cpu, -1, 34 fd = sys_perf_event_open(&attr, pid, cpu, -1,
@@ -47,16 +51,24 @@ static int perf_flag_probe(void)
47 err, strerror_r(err, sbuf, sizeof(sbuf))); 51 err, strerror_r(err, sbuf, sizeof(sbuf)));
48 52
49 /* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */ 53 /* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
50 fd = sys_perf_event_open(&attr, pid, cpu, -1, 0); 54 while (1) {
55 fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
56 if (fd < 0 && pid == -1 && errno == EACCES) {
57 pid = 0;
58 continue;
59 }
60 break;
61 }
51 err = errno; 62 err = errno;
52 63
64 if (fd >= 0)
65 close(fd);
66
53 if (WARN_ONCE(fd < 0 && err != EBUSY, 67 if (WARN_ONCE(fd < 0 && err != EBUSY,
54 "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n", 68 "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
55 err, strerror_r(err, sbuf, sizeof(sbuf)))) 69 err, strerror_r(err, sbuf, sizeof(sbuf))))
56 return -1; 70 return -1;
57 71
58 close(fd);
59
60 return 0; 72 return 0;
61} 73}
62 74
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index c94a9e03ecf1..e99a67632831 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -28,7 +28,7 @@ struct perf_mmap {
28 int mask; 28 int mask;
29 int refcnt; 29 int refcnt;
30 unsigned int prev; 30 unsigned int prev;
31 char event_copy[PERF_SAMPLE_MAX_SIZE]; 31 char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
32}; 32};
33 33
34struct perf_evlist { 34struct perf_evlist {
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index b24f9d8727a8..33b7a2aef713 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -11,6 +11,11 @@
11#include <symbol/kallsyms.h> 11#include <symbol/kallsyms.h>
12#include "debug.h" 12#include "debug.h"
13 13
14#ifndef EM_AARCH64
15#define EM_AARCH64 183 /* ARM 64 bit */
16#endif
17
18
14#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT 19#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
15extern char *cplus_demangle(const char *, int); 20extern char *cplus_demangle(const char *, int);
16 21
diff --git a/tools/thermal/tmon/.gitignore b/tools/thermal/tmon/.gitignore
new file mode 100644
index 000000000000..06e96be65276
--- /dev/null
+++ b/tools/thermal/tmon/.gitignore
@@ -0,0 +1 @@
/tmon
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
index e775adcbd29f..0788621c8d76 100644
--- a/tools/thermal/tmon/Makefile
+++ b/tools/thermal/tmon/Makefile
@@ -2,8 +2,8 @@ VERSION = 1.0
2 2
3BINDIR=usr/bin 3BINDIR=usr/bin
4WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int 4WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int
5CFLAGS= -O1 ${WARNFLAGS} -fstack-protector 5CFLAGS+= -O1 ${WARNFLAGS} -fstack-protector
6CC=gcc 6CC=$(CROSS_COMPILE)gcc
7 7
8CFLAGS+=-D VERSION=\"$(VERSION)\" 8CFLAGS+=-D VERSION=\"$(VERSION)\"
9LDFLAGS+= 9LDFLAGS+=
@@ -16,12 +16,21 @@ INSTALL_CONFIGFILE=install -m 644 -p
16CONFIG_FILE= 16CONFIG_FILE=
17CONFIG_PATH= 17CONFIG_PATH=
18 18
19# Static builds might require -ltinfo, for instance
20ifneq ($(findstring -static, $(LDFLAGS)),)
21STATIC := --static
22endif
23
24TMON_LIBS=-lm -lpthread
25TMON_LIBS += $(shell pkg-config --libs $(STATIC) panelw ncursesw 2> /dev/null || \
26 pkg-config --libs $(STATIC) panel ncurses 2> /dev/null || \
27 echo -lpanel -lncurses)
19 28
20OBJS = tmon.o tui.o sysfs.o pid.o 29OBJS = tmon.o tui.o sysfs.o pid.o
21OBJS += 30OBJS +=
22 31
23tmon: $(OBJS) Makefile tmon.h 32tmon: $(OBJS) Makefile tmon.h
24 $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS) -o $(TARGET) -lm -lpanel -lncursesw -ltinfo -lpthread 33 $(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) -o $(TARGET) $(TMON_LIBS)
25 34
26valgrind: tmon 35valgrind: tmon
27 sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET) 1> /dev/null 36 sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET) 1> /dev/null
diff --git a/tools/thermal/tmon/tmon.8 b/tools/thermal/tmon/tmon.8
index 0be727cb9892..02d5179803aa 100644
--- a/tools/thermal/tmon/tmon.8
+++ b/tools/thermal/tmon/tmon.8
@@ -55,6 +55,8 @@ The \fB-l --log\fP option write data to /var/tmp/tmon.log
55.PP 55.PP
56The \fB-t --time-interval\fP option sets the polling interval in seconds 56The \fB-t --time-interval\fP option sets the polling interval in seconds
57.PP 57.PP
58The \fB-T --target-temp\fP option sets the initial target temperature
59.PP
58The \fB-v --version\fP option shows the version of \fBtmon \fP 60The \fB-v --version\fP option shows the version of \fBtmon \fP
59.PP 61.PP
60The \fB-z --zone\fP option sets the target therma zone instance to be controlled 62The \fB-z --zone\fP option sets the target therma zone instance to be controlled
diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
index 09b7c3218334..9aa19652e8e8 100644
--- a/tools/thermal/tmon/tmon.c
+++ b/tools/thermal/tmon/tmon.c
@@ -64,6 +64,7 @@ void usage()
64 printf(" -h, --help show this help message\n"); 64 printf(" -h, --help show this help message\n");
65 printf(" -l, --log log data to /var/tmp/tmon.log\n"); 65 printf(" -l, --log log data to /var/tmp/tmon.log\n");
66 printf(" -t, --time-interval sampling time interval, > 1 sec.\n"); 66 printf(" -t, --time-interval sampling time interval, > 1 sec.\n");
67 printf(" -T, --target-temp initial target temperature\n");
67 printf(" -v, --version show version\n"); 68 printf(" -v, --version show version\n");
68 printf(" -z, --zone target thermal zone id\n"); 69 printf(" -z, --zone target thermal zone id\n");
69 70
@@ -219,6 +220,7 @@ static struct option opts[] = {
219 { "control", 1, NULL, 'c' }, 220 { "control", 1, NULL, 'c' },
220 { "daemon", 0, NULL, 'd' }, 221 { "daemon", 0, NULL, 'd' },
221 { "time-interval", 1, NULL, 't' }, 222 { "time-interval", 1, NULL, 't' },
223 { "target-temp", 1, NULL, 'T' },
222 { "log", 0, NULL, 'l' }, 224 { "log", 0, NULL, 'l' },
223 { "help", 0, NULL, 'h' }, 225 { "help", 0, NULL, 'h' },
224 { "version", 0, NULL, 'v' }, 226 { "version", 0, NULL, 'v' },
@@ -231,7 +233,7 @@ int main(int argc, char **argv)
231{ 233{
232 int err = 0; 234 int err = 0;
233 int id2 = 0, c; 235 int id2 = 0, c;
234 double yk = 0.0; /* controller output */ 236 double yk = 0.0, temp; /* controller output */
235 int target_tz_index; 237 int target_tz_index;
236 238
237 if (geteuid() != 0) { 239 if (geteuid() != 0) {
@@ -239,7 +241,7 @@ int main(int argc, char **argv)
239 exit(EXIT_FAILURE); 241 exit(EXIT_FAILURE);
240 } 242 }
241 243
242 while ((c = getopt_long(argc, argv, "c:dlht:vgz:", opts, &id2)) != -1) { 244 while ((c = getopt_long(argc, argv, "c:dlht:T:vgz:", opts, &id2)) != -1) {
243 switch (c) { 245 switch (c) {
244 case 'c': 246 case 'c':
245 no_control = 0; 247 no_control = 0;
@@ -254,6 +256,14 @@ int main(int argc, char **argv)
254 if (ticktime < 1) 256 if (ticktime < 1)
255 ticktime = 1; 257 ticktime = 1;
256 break; 258 break;
259 case 'T':
260 temp = strtod(optarg, NULL);
261 if (temp < 0) {
262 fprintf(stderr, "error: temperature must be positive\n");
263 return 1;
264 }
265 target_temp_user = temp;
266 break;
257 case 'l': 267 case 'l':
258 printf("Logging data to /var/tmp/tmon.log\n"); 268 printf("Logging data to /var/tmp/tmon.log\n");
259 logging = 1; 269 logging = 1;
diff --git a/tools/thermal/tmon/tui.c b/tools/thermal/tmon/tui.c
index 89f8ef0e15c8..b5d1c6b22dd3 100644
--- a/tools/thermal/tmon/tui.c
+++ b/tools/thermal/tmon/tui.c
@@ -30,6 +30,18 @@
30 30
31#include "tmon.h" 31#include "tmon.h"
32 32
33#define min(x, y) ({ \
34 typeof(x) _min1 = (x); \
35 typeof(y) _min2 = (y); \
36 (void) (&_min1 == &_min2); \
37 _min1 < _min2 ? _min1 : _min2; })
38
39#define max(x, y) ({ \
40 typeof(x) _max1 = (x); \
41 typeof(y) _max2 = (y); \
42 (void) (&_max1 == &_max2); \
43 _max1 > _max2 ? _max1 : _max2; })
44
33static PANEL *data_panel; 45static PANEL *data_panel;
34static PANEL *dialogue_panel; 46static PANEL *dialogue_panel;
35static PANEL *top; 47static PANEL *top;
@@ -98,6 +110,18 @@ void write_status_bar(int x, char *line)
98 wrefresh(status_bar_window); 110 wrefresh(status_bar_window);
99} 111}
100 112
113/* wrap at 5 */
114#define DIAG_DEV_ROWS 5
115/*
116 * list cooling devices + "set temp" entry; wraps after 5 rows, if they fit
117 */
118static int diag_dev_rows(void)
119{
120 int entries = ptdata.nr_cooling_dev + 1;
121 int rows = max(DIAG_DEV_ROWS, (entries + 1) / 2);
122 return min(rows, entries);
123}
124
101void setup_windows(void) 125void setup_windows(void)
102{ 126{
103 int y_begin = 1; 127 int y_begin = 1;
@@ -122,7 +146,7 @@ void setup_windows(void)
122 * dialogue window is a pop-up, when needed it lays on top of cdev win 146 * dialogue window is a pop-up, when needed it lays on top of cdev win
123 */ 147 */
124 148
125 dialogue_window = subwin(stdscr, ptdata.nr_cooling_dev+5, maxx-50, 149 dialogue_window = subwin(stdscr, diag_dev_rows() + 5, maxx-50,
126 DIAG_Y, DIAG_X); 150 DIAG_Y, DIAG_X);
127 151
128 thermal_data_window = subwin(stdscr, ptdata.nr_tz_sensor * 152 thermal_data_window = subwin(stdscr, ptdata.nr_tz_sensor *
@@ -258,21 +282,26 @@ void show_cooling_device(void)
258} 282}
259 283
260const char DIAG_TITLE[] = "[ TUNABLES ]"; 284const char DIAG_TITLE[] = "[ TUNABLES ]";
261#define DIAG_DEV_ROWS 5
262void show_dialogue(void) 285void show_dialogue(void)
263{ 286{
264 int j, x = 0, y = 0; 287 int j, x = 0, y = 0;
288 int rows, cols;
265 WINDOW *w = dialogue_window; 289 WINDOW *w = dialogue_window;
266 290
267 if (tui_disabled || !w) 291 if (tui_disabled || !w)
268 return; 292 return;
269 293
294 getmaxyx(w, rows, cols);
295
296 /* Silence compiler 'unused' warnings */
297 (void)cols;
298
270 werase(w); 299 werase(w);
271 box(w, 0, 0); 300 box(w, 0, 0);
272 mvwprintw(w, 0, maxx/4, DIAG_TITLE); 301 mvwprintw(w, 0, maxx/4, DIAG_TITLE);
273 /* list all the available tunables */ 302 /* list all the available tunables */
274 for (j = 0; j <= ptdata.nr_cooling_dev; j++) { 303 for (j = 0; j <= ptdata.nr_cooling_dev; j++) {
275 y = j % DIAG_DEV_ROWS; 304 y = j % diag_dev_rows();
276 if (y == 0 && j != 0) 305 if (y == 0 && j != 0)
277 x += 20; 306 x += 20;
278 if (j == ptdata.nr_cooling_dev) 307 if (j == ptdata.nr_cooling_dev)
@@ -283,12 +312,10 @@ void show_dialogue(void)
283 ptdata.cdi[j].type, ptdata.cdi[j].instance); 312 ptdata.cdi[j].type, ptdata.cdi[j].instance);
284 } 313 }
285 wattron(w, A_BOLD); 314 wattron(w, A_BOLD);
286 mvwprintw(w, DIAG_DEV_ROWS+1, 1, "Enter Choice [A-Z]?"); 315 mvwprintw(w, diag_dev_rows()+1, 1, "Enter Choice [A-Z]?");
287 wattroff(w, A_BOLD); 316 wattroff(w, A_BOLD);
288 /* y size of dialogue win is nr cdev + 5, so print legend 317 /* print legend at the bottom line */
289 * at the bottom line 318 mvwprintw(w, rows - 2, 1,
290 */
291 mvwprintw(w, ptdata.nr_cooling_dev+3, 1,
292 "Legend: A=Active, P=Passive, C=Critical"); 319 "Legend: A=Active, P=Passive, C=Critical");
293 320
294 wrefresh(dialogue_window); 321 wrefresh(dialogue_window);
@@ -437,7 +464,7 @@ static void handle_input_choice(int ch)
437 snprintf(buf, sizeof(buf), "New Value for %.10s-%2d: ", 464 snprintf(buf, sizeof(buf), "New Value for %.10s-%2d: ",
438 ptdata.cdi[cdev_id].type, 465 ptdata.cdi[cdev_id].type,
439 ptdata.cdi[cdev_id].instance); 466 ptdata.cdi[cdev_id].instance);
440 write_dialogue_win(buf, DIAG_DEV_ROWS+2, 2); 467 write_dialogue_win(buf, diag_dev_rows() + 2, 2);
441 handle_input_val(cdev_id); 468 handle_input_val(cdev_id);
442 } else { 469 } else {
443 snprintf(buf, sizeof(buf), "Invalid selection %d", ch); 470 snprintf(buf, sizeof(buf), "Invalid selection %d", ch);